diff --git a/client/src/builder.rs b/client/src/builder.rs index defb8465f..52dc282ad 100644 --- a/client/src/builder.rs +++ b/client/src/builder.rs @@ -47,7 +47,7 @@ pub struct ClientBuilder { impl Default for ClientBuilder { fn default() -> Self { ClientBuilder { - config: ClientConfig::default() + config: ClientConfig::default(), } } } @@ -59,9 +59,12 @@ impl ClientBuilder { } /// Creates a `ClientBuilder` using a configuration file as the initial state. - pub fn from_config(path: T) -> Result where T: Into { + pub fn from_config(path: T) -> Result + where + T: Into, + { Ok(ClientBuilder { - config: ClientConfig::load(&path.into())? + config: ClientConfig::load(&path.into())?, }) } @@ -90,19 +93,28 @@ impl ClientBuilder { } /// Sets the application name. - pub fn application_name(mut self, application_name: T) -> Self where T: Into { + pub fn application_name(mut self, application_name: T) -> Self + where + T: Into, + { self.config.application_name = application_name.into(); self } /// Sets the application uri - pub fn application_uri(mut self, application_uri: T) -> Self where T: Into { + pub fn application_uri(mut self, application_uri: T) -> Self + where + T: Into, + { self.config.application_uri = application_uri.into(); self } /// Sets the product uri. - pub fn product_uri(mut self, product_uri: T) -> Self where T: Into { + pub fn product_uri(mut self, product_uri: T) -> Self + where + T: Into, + { self.config.product_uri = product_uri.into(); self } @@ -117,7 +129,10 @@ impl ClientBuilder { /// Sets a custom client certificate path. The path is required to be provided as a partial /// path relative to the PKI directory. If set, this path will be used to read the client /// certificate from disk. The certificate can be in either the .der or .pem format. - pub fn certificate_path(mut self, certificate_path: T) -> Self where T: Into { + pub fn certificate_path(mut self, certificate_path: T) -> Self + where + T: Into, + { self.config.certificate_path = Some(certificate_path.into()); self } @@ -125,7 +140,10 @@ impl ClientBuilder { /// Sets a custom private key path. The path is required to be provided as a partial path /// relative to the PKI directory. If set, this path will be used to read the private key /// from disk. - pub fn private_key_path(mut self, private_key_path: T) -> Self where T: Into { + pub fn private_key_path(mut self, private_key_path: T) -> Self + where + T: Into, + { self.config.private_key_path = Some(private_key_path.into()); self } @@ -141,7 +159,10 @@ impl ClientBuilder { /// Sets the pki directory where client's own key pair is stored and where `/trusted` and /// `/rejected` server certificates are stored. - pub fn pki_dir(mut self, pki_dir: T) -> Self where T: Into { + pub fn pki_dir(mut self, pki_dir: T) -> Self + where + T: Into, + { self.config.pki_dir = pki_dir.into(); self } @@ -154,27 +175,39 @@ impl ClientBuilder { } /// Sets the id of the default endpoint to connect to. - pub fn default_endpoint(mut self, endpoint_id: T) -> Self where T: Into { + pub fn default_endpoint(mut self, endpoint_id: T) -> Self + where + T: Into, + { self.config.default_endpoint = endpoint_id.into(); self } /// Adds an endpoint to the list of endpoints the client knows of. - pub fn endpoint(mut self, endpoint_id: T, endpoint: ClientEndpoint) -> Self where T: Into { + pub fn endpoint(mut self, endpoint_id: T, endpoint: ClientEndpoint) -> Self + where + T: Into, + { self.config.endpoints.insert(endpoint_id.into(), endpoint); self } /// Adds multiple endpoints to the list of endpoints the client knows of. - pub fn endpoints(mut self, endpoints: Vec<(T, ClientEndpoint)>) -> Self where T: Into { + pub fn endpoints(mut self, endpoints: Vec<(T, ClientEndpoint)>) -> Self + where + T: Into, + { for e in endpoints { self.config.endpoints.insert(e.0.into(), e.1); - }; + } self } /// Adds a user token to the list supported by the client. - pub fn user_token(mut self, user_token_id: T, user_token: ClientUserToken) -> Self where T: Into { + pub fn user_token(mut self, user_token_id: T, user_token: ClientUserToken) -> Self + where + T: Into, + { let user_token_id = user_token_id.into(); if user_token_id == ANONYMOUS_USER_TOKEN_ID { panic!("User token id {} is reserved", user_token_id); @@ -237,7 +270,10 @@ fn client_builder() { assert_eq!(c.private_key_path, Some(PathBuf::from("keyxyz"))); assert_eq!(c.trust_server_certs, true); assert_eq!(c.pki_dir, PathBuf::from_str("pkixyz").unwrap()); - assert_eq!(c.preferred_locales, vec!["a".to_string(), "b".to_string(), "c".to_string()]); + assert_eq!( + c.preferred_locales, + vec!["a".to_string(), "b".to_string(), "c".to_string()] + ); assert_eq!(c.default_endpoint, "http://default"); assert_eq!(c.session_retry_interval, 1234); assert_eq!(c.session_retry_limit, 999); diff --git a/client/src/callbacks.rs b/client/src/callbacks.rs index c35fe3712..c846d88ae 100644 --- a/client/src/callbacks.rs +++ b/client/src/callbacks.rs @@ -11,13 +11,9 @@ //! [`DataChangeCallback`]: ./struct.DataChangeCallback.html //! [`EventCallback`]: ./struct.EventCallback.html - use std::fmt; -use opcua_types::{ - service_types::EventNotificationList, - status_code::StatusCode, -}; +use opcua_types::{service_types::EventNotificationList, status_code::StatusCode}; use crate::subscription::MonitoredItem; @@ -51,7 +47,6 @@ pub trait OnConnectionStatusChange { fn on_connection_status_change(&mut self, connected: bool); } - /// The `OnSessionClosed` trait can be used to register on a session and called to notify the client /// that the session has closed. pub trait OnSessionClosed { @@ -69,7 +64,7 @@ pub trait OnSessionClosed { /// a data change occurs. pub struct DataChangeCallback { /// The actual call back - cb: Box) + Send + Sync + 'static> + cb: Box) + Send + Sync + 'static>, } impl OnSubscriptionNotification for DataChangeCallback { @@ -80,10 +75,11 @@ impl OnSubscriptionNotification for DataChangeCallback { impl DataChangeCallback { /// Constructs a callback from the supplied function - pub fn new(cb: CB) -> Self where CB: Fn(Vec<&MonitoredItem>) + Send + Sync + 'static { - Self { - cb: Box::new(cb) - } + pub fn new(cb: CB) -> Self + where + CB: Fn(Vec<&MonitoredItem>) + Send + Sync + 'static, + { + Self { cb: Box::new(cb) } } } @@ -91,7 +87,7 @@ impl DataChangeCallback { /// when an event occurs. pub struct EventCallback { /// The actual call back - cb: Box + cb: Box, } impl OnSubscriptionNotification for EventCallback { @@ -102,10 +98,11 @@ impl OnSubscriptionNotification for EventCallback { impl EventCallback { /// Constructs a callback from the supplied function - pub fn new(cb: CB) -> Self where CB: Fn(&EventNotificationList) + Send + Sync + 'static { - Self { - cb: Box::new(cb) - } + pub fn new(cb: CB) -> Self + where + CB: Fn(&EventNotificationList) + Send + Sync + 'static, + { + Self { cb: Box::new(cb) } } } @@ -133,10 +130,11 @@ impl OnConnectionStatusChange for ConnectionStatusCallback { impl ConnectionStatusCallback { // Constructor - pub fn new(cb: CB) -> Self where CB: FnMut(bool) + Send + Sync + 'static { - Self { - cb: Box::new(cb) - } + pub fn new(cb: CB) -> Self + where + CB: FnMut(bool) + Send + Sync + 'static, + { + Self { cb: Box::new(cb) } } } @@ -154,9 +152,10 @@ impl OnSessionClosed for SessionClosedCallback { impl SessionClosedCallback { // Constructor - pub fn new(cb: CB) -> Self where CB: FnMut(StatusCode) + Send + Sync + 'static { - Self { - cb: Box::new(cb) - } + pub fn new(cb: CB) -> Self + where + CB: FnMut(StatusCode) + Send + Sync + 'static, + { + Self { cb: Box::new(cb) } } } diff --git a/client/src/client.rs b/client/src/client.rs index b285eff84..0ee8ea2e4 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -11,23 +11,21 @@ use std::{ }; use opcua_core::{ - config::Config, comms::url::{ - hostname_from_url, is_opc_ua_binary_url, is_valid_opc_ua_url, server_url_from_endpoint_url, url_matches, - url_matches_except_host, url_with_replaced_hostname, + hostname_from_url, is_opc_ua_binary_url, is_valid_opc_ua_url, server_url_from_endpoint_url, + url_matches, url_matches_except_host, url_with_replaced_hostname, }, + config::Config, }; use opcua_crypto::{CertificateStore, SecurityPolicy}; use opcua_types::{ + service_types::{ApplicationDescription, EndpointDescription, RegisteredServer}, + status_code::StatusCode, MessageSecurityMode, - service_types::{ - ApplicationDescription, EndpointDescription, RegisteredServer, - }, - status_code::StatusCode }; use crate::{ - config::{ANONYMOUS_USER_TOKEN_ID, ClientConfig, ClientEndpoint}, + config::{ClientConfig, ClientEndpoint, ANONYMOUS_USER_TOKEN_ID}, session::{Session, SessionInfo}, session_retry::SessionRetryPolicy, }; @@ -72,14 +70,14 @@ pub struct Client { impl Drop for Client { fn drop(&mut self) { -// TODO - this causes panics on unwrap - have to figure the reason out -// for session in self.sessions.iter_mut() { -// // Disconnect -// let mut session = trace_write_lock_unwrap!(session.session); -// if session.is_connected() { -// session.disconnect() -// } -// } + // TODO - this causes panics on unwrap - have to figure the reason out + // for session in self.sessions.iter_mut() { + // // Disconnect + // let mut session = trace_write_lock_unwrap!(session.session); + // if session.is_connected() { + // session.disconnect() + // } + // } } } @@ -115,10 +113,19 @@ impl Client { /// [`ClientBuilder`]: ../config/struct.ClientBuilder.html /// pub fn new(config: ClientConfig) -> Client { - let application_description = if config.create_sample_keypair { Some(config.application_description()) } else { None }; + let application_description = if config.create_sample_keypair { + Some(config.application_description()) + } else { + None + }; - let (mut certificate_store, client_certificate, client_pkey) = CertificateStore::new_with_keypair( - &config.pki_dir, config.certificate_path.as_deref(), config.private_key_path.as_deref(), application_description); + let (mut certificate_store, client_certificate, client_pkey) = + CertificateStore::new_with_keypair( + &config.pki_dir, + config.certificate_path.as_deref(), + config.private_key_path.as_deref(), + application_description, + ); if client_certificate.is_none() || client_pkey.is_none() { error!("Client is missing its application instance certificate and/or its private key. Encrypted endpoints will not function correctly.") } @@ -138,7 +145,11 @@ impl Client { // Never try 0 => SessionRetryPolicy::never(session_timeout), // Try this many times - session_retry_limit => SessionRetryPolicy::new(session_timeout, session_retry_limit as u32, config.session_retry_interval) + session_retry_limit => SessionRetryPolicy::new( + session_timeout, + session_retry_limit as u32, + config.session_retry_interval, + ), }; Client { @@ -168,20 +179,28 @@ impl Client { /// /// [`Session`]: ../session/struct.Session.html /// - pub fn connect_to_endpoint_id(&mut self, endpoint_id: Option<&str>) -> Result>, StatusCode> { + pub fn connect_to_endpoint_id( + &mut self, + endpoint_id: Option<&str>, + ) -> Result>, StatusCode> { // Ask the server associated with the default endpoint for its list of endpoints let endpoints = match self.get_server_endpoints() { Result::Err(status_code) => { error!("Cannot get endpoints for server, error - {}", status_code); return Err(status_code); } - Result::Ok(endpoints) => endpoints + Result::Ok(endpoints) => endpoints, }; info!("Server has these endpoints:"); - endpoints.iter().for_each(|e| info!(" {} - {:?} / {:?}", e.endpoint_url, - SecurityPolicy::from_str(e.security_policy_uri.as_ref()).unwrap(), - e.security_mode)); + endpoints.iter().for_each(|e| { + info!( + " {} - {:?} / {:?}", + e.endpoint_url, + SecurityPolicy::from_str(e.security_policy_uri.as_ref()).unwrap(), + e.security_mode + ) + }); // Create a session to an endpoint. If an endpoint id is specified use that let session = if let Some(endpoint_id) = endpoint_id { @@ -194,7 +213,10 @@ impl Client { // Connect to the server let mut session = session.write().unwrap(); if let Err(result) = session.connect_and_activate() { - error!("Got an error while creating the default session - {}", result); + error!( + "Got an error while creating the default session - {}", + result + ); } } @@ -212,37 +234,58 @@ impl Client { /// /// [`Session`]: ../session/struct.Session.html /// - pub fn connect_to_endpoint(&mut self, endpoint: T, user_identity_token: IdentityToken) -> Result>, StatusCode> where T: Into { + pub fn connect_to_endpoint( + &mut self, + endpoint: T, + user_identity_token: IdentityToken, + ) -> Result>, StatusCode> + where + T: Into, + { let endpoint = endpoint.into(); // Get the server endpoints let server_url = server_url_from_endpoint_url(endpoint.endpoint_url.as_ref()) .map_err(|_| StatusCode::BadTcpEndpointUrlInvalid)?; - let server_endpoints = self.get_server_endpoints_from_url(server_url) - .map_err(|status_code| { - error!("Cannot get endpoints for server, error - {}", status_code); - status_code - })?; + let server_endpoints = + self.get_server_endpoints_from_url(server_url) + .map_err(|status_code| { + error!("Cannot get endpoints for server, error - {}", status_code); + status_code + })?; // Find the server endpoint that matches the one desired let security_policy = SecurityPolicy::from_str(endpoint.security_policy_uri.as_ref()) .map_err(|_| StatusCode::BadSecurityPolicyRejected)?; - let server_endpoint = Client::find_server_endpoint(&server_endpoints, endpoint.endpoint_url.as_ref(), security_policy, endpoint.security_mode) - .ok_or(StatusCode::BadTcpEndpointUrlInvalid) - .map_err(|status_code| { - error!("Cannot find matching endpoint for {}", endpoint.endpoint_url.as_ref()); - status_code - })?; + let server_endpoint = Client::find_server_endpoint( + &server_endpoints, + endpoint.endpoint_url.as_ref(), + security_policy, + endpoint.security_mode, + ) + .ok_or(StatusCode::BadTcpEndpointUrlInvalid) + .map_err(|status_code| { + error!( + "Cannot find matching endpoint for {}", + endpoint.endpoint_url.as_ref() + ); + status_code + })?; // Create a session - let session = self.new_session_from_info((server_endpoint, user_identity_token)).unwrap(); + let session = self + .new_session_from_info((server_endpoint, user_identity_token)) + .unwrap(); { // Connect to the server let mut session = session.write().unwrap(); if let Err(result) = session.connect_and_activate() { - error!("Got an error while creating the default session - {}", result); + error!( + "Got an error while creating the default session - {}", + result + ); } } @@ -261,7 +304,10 @@ impl Client { } else if let Some(endpoint) = self.config.endpoints.get(&default_endpoint_id) { Ok(endpoint.clone()) } else { - Err(format!("Cannot find default endpoint with id {}", default_endpoint_id)) + Err(format!( + "Cannot find default endpoint with id {}", + default_endpoint_id + )) } } @@ -270,7 +316,10 @@ impl Client { /// /// [`Session`]: ../session/struct.Session.html /// - pub fn new_session(&mut self, endpoints: &[EndpointDescription]) -> Result>, String> { + pub fn new_session( + &mut self, + endpoints: &[EndpointDescription], + ) -> Result>, String> { let endpoint = self.default_endpoint()?; self.new_session_from_endpoint(&endpoint, endpoints) } @@ -280,8 +329,13 @@ impl Client { /// /// [`Session`]: ../session/struct.Session.html /// - pub fn new_session_from_id(&mut self, endpoint_id: T, endpoints: &[EndpointDescription]) -> Result>, String> - where T: Into + pub fn new_session_from_id( + &mut self, + endpoint_id: T, + endpoints: &[EndpointDescription], + ) -> Result>, String> + where + T: Into, { let endpoint_id = endpoint_id.into(); let endpoint = { @@ -301,9 +355,11 @@ impl Client { /// /// [`Session`]: ../session/struct.Session.html /// - fn new_session_from_endpoint(&mut self, client_endpoint: &ClientEndpoint, - endpoints: &[EndpointDescription]) -> Result>, String> - { + fn new_session_from_endpoint( + &mut self, + client_endpoint: &ClientEndpoint, + endpoints: &[EndpointDescription], + ) -> Result>, String> { let session_info = self.session_info_for_endpoint(client_endpoint, endpoints)?; self.new_session_from_info(session_info) } @@ -314,12 +370,26 @@ impl Client { /// /// [`Session`]: ../session/struct.Session.html /// - pub fn new_session_from_info(&mut self, session_info: T) -> Result>, String> where T: Into { + pub fn new_session_from_info( + &mut self, + session_info: T, + ) -> Result>, String> + where + T: Into, + { let session_info = session_info.into(); if !is_opc_ua_binary_url(session_info.endpoint.endpoint_url.as_ref()) { - Err(format!("Endpoint url {}, is not a valid / supported url", session_info.endpoint.endpoint_url)) + Err(format!( + "Endpoint url {}, is not a valid / supported url", + session_info.endpoint.endpoint_url + )) } else { - let session = Arc::new(RwLock::new(Session::new(self.application_description(), self.certificate_store.clone(), session_info, self.session_retry_policy.clone()))); + let session = Arc::new(RwLock::new(Session::new( + self.application_description(), + self.certificate_store.clone(), + session_info, + self.session_retry_policy.clone(), + ))); Ok(session) } } @@ -335,7 +405,10 @@ impl Client { if let Ok(server_url) = server_url_from_endpoint_url(&default_endpoint.url) { self.get_server_endpoints_from_url(server_url) } else { - error!("Cannot create a server url from the specified endpoint url {}", default_endpoint.url); + error!( + "Cannot create a server url from the specified endpoint url {}", + default_endpoint.url + ); Err(StatusCode::BadUnexpectedError) } } else { @@ -365,8 +438,12 @@ impl Client { /// /// [`EndpointDescription`]: ../../opcua_types/service_types/endpoint_description/struct.EndpointDescription.html /// - pub fn get_server_endpoints_from_url(&self, server_url: T) -> Result, StatusCode> - where T: Into + pub fn get_server_endpoints_from_url( + &self, + server_url: T, + ) -> Result, StatusCode> + where + T: Into, { let server_url = server_url.into(); if !is_opc_ua_binary_url(&server_url) { @@ -380,7 +457,12 @@ impl Client { user_identity_token: IdentityToken::Anonymous, preferred_locales, }; - let mut session = Session::new(self.application_description(), self.certificate_store.clone(), session_info, self.session_retry_policy.clone()); + let mut session = Session::new( + self.application_description(), + self.certificate_store.clone(), + session_info, + self.session_retry_policy.clone(), + ); session.connect()?; let result = session.get_endpoints()?; session.disconnect(); @@ -393,8 +475,12 @@ impl Client { /// /// [`ApplicationDescription`]: ../../opcua_types/service_types/application_description/struct.ApplicationDescription.html /// - pub fn find_servers(&mut self, discovery_endpoint_url: T) -> Result, StatusCode> - where T: Into + pub fn find_servers( + &mut self, + discovery_endpoint_url: T, + ) -> Result, StatusCode> + where + T: Into, { let discovery_endpoint_url = discovery_endpoint_url.into(); debug!("find_servers, {}", discovery_endpoint_url); @@ -406,21 +492,31 @@ impl Client { let connected = session.connect(); if connected.is_ok() { // Find me some some servers - let result = session.find_servers(discovery_endpoint_url.clone()) + let result = session + .find_servers(discovery_endpoint_url.clone()) .map_err(|err| { - error!("Cannot find servers on discovery server {} - check this error - {:?}", discovery_endpoint_url, err); + error!( + "Cannot find servers on discovery server {} - check this error - {:?}", + discovery_endpoint_url, err + ); err }); session.disconnect(); result } else { let result = connected.unwrap_err(); - error!("Cannot connect to {} - check this error - {}", discovery_endpoint_url, result); + error!( + "Cannot connect to {} - check this error - {}", + discovery_endpoint_url, result + ); Err(result) } } else { let result = StatusCode::BadUnexpectedError; - error!("Cannot create a sesion to {} - check if url is malformed", discovery_endpoint_url); + error!( + "Cannot create a sesion to {} - check if url is malformed", + discovery_endpoint_url + ); Err(result) } } @@ -438,12 +534,20 @@ impl Client { /// /// For example the standard OPC foundation discovery server will drop the server's cert in a /// `rejected/` folder on the filesystem and this cert has to be moved to a `trusted/certs/` folder. - pub fn register_server(&mut self, discovery_endpoint_url: T, - server: RegisteredServer) -> Result<(), StatusCode> - where T: Into { + pub fn register_server( + &mut self, + discovery_endpoint_url: T, + server: RegisteredServer, + ) -> Result<(), StatusCode> + where + T: Into, + { let discovery_endpoint_url = discovery_endpoint_url.into(); if !is_valid_opc_ua_url(&discovery_endpoint_url) { - error!("Discovery endpoint url \"{}\" is not a valid OPC UA url", discovery_endpoint_url); + error!( + "Discovery endpoint url \"{}\" is not a valid OPC UA url", + discovery_endpoint_url + ); Err(StatusCode::BadTcpEndpointUrlInvalid) } else { // Get a list of endpoints from the discovery server @@ -453,10 +557,15 @@ impl Client { Err(StatusCode::BadUnexpectedError) } else { // Now choose the strongest endpoint to register through - if let Some(endpoint) = endpoints.iter() + if let Some(endpoint) = endpoints + .iter() .filter(|e| self.is_supported_endpoint(*e)) - .max_by(|a, b| a.security_level.cmp(&b.security_level)) { - debug!("Registering this server via discovery endpoint {:?}", endpoint); + .max_by(|a, b| a.security_level.cmp(&b.security_level)) + { + debug!( + "Registering this server via discovery endpoint {:?}", + endpoint + ); let session = self.new_session_from_info(endpoint.clone()); if let Ok(session) = session { let mut session = trace_write_lock_unwrap!(session); @@ -468,12 +577,18 @@ impl Client { result } Err(result) => { - error!("Cannot connect to {} - check this error - {}", discovery_endpoint_url, result); + error!( + "Cannot connect to {} - check this error - {}", + discovery_endpoint_url, result + ); Err(result) } } } else { - error!("Cannot create a sesion to {} - check if url is malformed", discovery_endpoint_url); + error!( + "Cannot create a sesion to {} - check if url is malformed", + discovery_endpoint_url + ); Err(StatusCode::BadUnexpectedError) } } else { @@ -506,39 +621,51 @@ impl Client { /// assert!(Client::find_server_endpoint(&endpoints, "opc.tcp://foo:123", SecurityPolicy::None, MessageSecurityMode::Sign).is_none()); /// ``` /// - pub fn find_server_endpoint(endpoints: &[EndpointDescription], endpoint_url: T, - security_policy: SecurityPolicy, - security_mode: MessageSecurityMode) -> Option - where T: Into { + pub fn find_server_endpoint( + endpoints: &[EndpointDescription], + endpoint_url: T, + security_policy: SecurityPolicy, + security_mode: MessageSecurityMode, + ) -> Option + where + T: Into, + { // Iterate the supplied endpoints looking for the closest match. let security_policy_uri = security_policy.to_uri(); let endpoint_url = endpoint_url.into(); // Do an exact match first - let result = endpoints.iter().find(|e| { - e.security_mode == security_mode && - e.security_policy_uri.as_ref() == security_policy_uri && - url_matches(e.endpoint_url.as_ref(), &endpoint_url) - }).cloned(); + let result = endpoints + .iter() + .find(|e| { + e.security_mode == security_mode + && e.security_policy_uri.as_ref() == security_policy_uri + && url_matches(e.endpoint_url.as_ref(), &endpoint_url) + }) + .cloned(); // If something was found, return it, otherwise try a fuzzier match that ignores the hostname. if result.is_some() { result } else { - endpoints.iter().find(|e| { - e.security_mode == security_mode && - e.security_policy_uri.as_ref() == security_policy_uri && - url_matches_except_host(e.endpoint_url.as_ref(), &endpoint_url) - }).cloned() + endpoints + .iter() + .find(|e| { + e.security_mode == security_mode + && e.security_policy_uri.as_ref() == security_policy_uri + && url_matches_except_host(e.endpoint_url.as_ref(), &endpoint_url) + }) + .cloned() } } /// Determine if we recognize the security of this endpoint fn is_supported_endpoint(&self, endpoint: &EndpointDescription) -> bool { - if let Ok(security_policy) = SecurityPolicy::from_str(endpoint.security_policy_uri.as_ref()) { + if let Ok(security_policy) = SecurityPolicy::from_str(endpoint.security_policy_uri.as_ref()) + { match security_policy { SecurityPolicy::Unknown => false, - _ => true + _ => true, } } else { false @@ -547,16 +674,25 @@ impl Client { /// Returns an identity token corresponding to the matching user in the configuration. Or None /// if there is no matching token. - fn client_identity_token(&self, user_token_id: T) -> Option where T: Into { + fn client_identity_token(&self, user_token_id: T) -> Option + where + T: Into, + { let user_token_id = user_token_id.into(); if user_token_id == ANONYMOUS_USER_TOKEN_ID { Some(IdentityToken::Anonymous) } else if let Some(token) = self.config.user_tokens.get(&user_token_id) { if let Some(ref password) = token.password { - Some(IdentityToken::UserName(token.user.clone(), password.clone())) + Some(IdentityToken::UserName( + token.user.clone(), + password.clone(), + )) } else if let Some(ref cert_path) = token.cert_path { if let Some(ref private_key_path) = token.private_key_path { - Some(IdentityToken::X509(PathBuf::from(cert_path), PathBuf::from(private_key_path))) + Some(IdentityToken::X509( + PathBuf::from(cert_path), + PathBuf::from(private_key_path), + )) } else { None } @@ -569,26 +705,33 @@ impl Client { } /// Find an endpoint supplied from the list of endpoints that matches the input criteria - pub fn find_matching_endpoint(endpoints: &[EndpointDescription], - endpoint_url: &str, security_policy: SecurityPolicy, - security_mode: MessageSecurityMode) -> Option - { + pub fn find_matching_endpoint( + endpoints: &[EndpointDescription], + endpoint_url: &str, + security_policy: SecurityPolicy, + security_mode: MessageSecurityMode, + ) -> Option { if security_policy == SecurityPolicy::Unknown { panic!("Cannot match against unknown security policy"); } - let matching_endpoint = endpoints.iter().find(|e| { - // Endpoint matches if the security mode, policy and url match - security_mode == e.security_mode && - security_policy == SecurityPolicy::from_uri(e.security_policy_uri.as_ref()) && - url_matches_except_host(endpoint_url, e.endpoint_url.as_ref()) - }).cloned(); + let matching_endpoint = endpoints + .iter() + .find(|e| { + // Endpoint matches if the security mode, policy and url match + security_mode == e.security_mode + && security_policy == SecurityPolicy::from_uri(e.security_policy_uri.as_ref()) + && url_matches_except_host(endpoint_url, e.endpoint_url.as_ref()) + }) + .cloned(); // Issue #16, #17 - the server may advertise an endpoint whose hostname is inaccessible // to the client so substitute the advertised hostname with the one the client supplied. if let Some(mut matching_endpoint) = matching_endpoint { if let Ok(hostname) = hostname_from_url(endpoint_url) { - if let Ok(new_endpoint_url) = url_with_replaced_hostname(matching_endpoint.endpoint_url.as_ref(), &hostname) { + if let Ok(new_endpoint_url) = + url_with_replaced_hostname(matching_endpoint.endpoint_url.as_ref(), &hostname) + { matching_endpoint.endpoint_url = new_endpoint_url.into(); Some(matching_endpoint) } else { @@ -603,18 +746,32 @@ impl Client { } /// Creates a [`SessionInfo`](SessionInfo) information from the supplied client endpoint. - fn session_info_for_endpoint(&self, client_endpoint: &ClientEndpoint, endpoints: &[EndpointDescription]) -> Result { + fn session_info_for_endpoint( + &self, + client_endpoint: &ClientEndpoint, + endpoints: &[EndpointDescription], + ) -> Result { // Enumerate endpoints looking for matching one if let Ok(security_policy) = SecurityPolicy::from_str(&client_endpoint.security_policy) { let security_mode = MessageSecurityMode::from(client_endpoint.security_mode.as_ref()); if security_mode != MessageSecurityMode::Invalid { let endpoint_url = client_endpoint.url.clone(); // Now find a matching endpoint from those on the server - let endpoint = Self::find_matching_endpoint(endpoints, &endpoint_url, security_policy, security_mode); + let endpoint = Self::find_matching_endpoint( + endpoints, + &endpoint_url, + security_policy, + security_mode, + ); if endpoint.is_none() { Err(format!("Endpoint {}, {:?} / {:?} does not match against any supplied by the server", endpoint_url, security_policy, security_mode)) - } else if let Some(user_identity_token) = self.client_identity_token(client_endpoint.user_token_id.clone()) { - info!("Creating a session for endpoint {}, {:?} / {:?}", endpoint_url, security_policy, security_mode); + } else if let Some(user_identity_token) = + self.client_identity_token(client_endpoint.user_token_id.clone()) + { + info!( + "Creating a session for endpoint {}, {:?} / {:?}", + endpoint_url, security_policy, security_mode + ); let preferred_locales = self.config.preferred_locales.clone(); Ok(SessionInfo { endpoint: endpoint.unwrap().clone(), @@ -622,13 +779,22 @@ impl Client { preferred_locales, }) } else { - Err(format!("Endpoint {} user id cannot be found", client_endpoint.user_token_id)) + Err(format!( + "Endpoint {} user id cannot be found", + client_endpoint.user_token_id + )) } } else { - Err(format!("Endpoint {} security mode {} is invalid", client_endpoint.url, client_endpoint.security_mode)) + Err(format!( + "Endpoint {} security mode {} is invalid", + client_endpoint.url, client_endpoint.security_mode + )) } } else { - Err(format!("Endpoint {} security policy {} is invalid", client_endpoint.url, client_endpoint.security_policy)) + Err(format!( + "Endpoint {} security policy {} is invalid", + client_endpoint.url, client_endpoint.security_policy + )) } } } diff --git a/client/src/comms/tcp_transport.rs b/client/src/comms/tcp_transport.rs index 249279bed..409593cd9 100644 --- a/client/src/comms/tcp_transport.rs +++ b/client/src/comms/tcp_transport.rs @@ -13,14 +13,14 @@ use std::sync::{Arc, Mutex, RwLock}; use std::thread; use std::time::{Duration, Instant}; -use futures::{Future, Stream}; use futures::future::{self}; use futures::sync::mpsc::{UnboundedReceiver, UnboundedSender}; +use futures::{Future, Stream}; use tokio; use tokio::net::TcpStream; use tokio_codec::FramedRead; -use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::io::{self, ReadHalf, WriteHalf}; +use tokio_io::{AsyncRead, AsyncWrite}; use tokio_timer::Interval; use opcua_core::{ @@ -34,9 +34,7 @@ use opcua_core::{ prelude::*, RUNTIME, }; -use opcua_types::{ - status_code::StatusCode, -}; +use opcua_types::status_code::StatusCode; use crate::{ callbacks::OnSessionClosed, @@ -45,8 +43,16 @@ use crate::{ session_state::{ConnectionState, SessionState}, }; -macro_rules! connection_state {( $s:expr ) => { *trace_read_lock_unwrap!($s) } } -macro_rules! set_connection_state {( $s:expr, $v:expr ) => { *trace_write_lock_unwrap!($s) = $v } } +macro_rules! connection_state { + ( $s:expr ) => { + *trace_read_lock_unwrap!($s) + }; +} +macro_rules! set_connection_state { + ( $s:expr, $v:expr ) => { + *trace_write_lock_unwrap!($s) = $v + }; +} struct ReadState { pub state: Arc>, @@ -63,19 +69,32 @@ impl Drop for ReadState { } impl ReadState { - fn turn_received_chunks_into_message(&mut self, chunks: &[MessageChunk]) -> Result { + fn turn_received_chunks_into_message( + &mut self, + chunks: &[MessageChunk], + ) -> Result { // Validate that all chunks have incrementing sequence numbers and valid chunk types let secure_channel = trace_read_lock_unwrap!(self.secure_channel); - self.last_received_sequence_number = Chunker::validate_chunks(self.last_received_sequence_number + 1, &secure_channel, chunks)?; + self.last_received_sequence_number = Chunker::validate_chunks( + self.last_received_sequence_number + 1, + &secure_channel, + chunks, + )?; // Now decode Chunker::decode(&chunks, &secure_channel, None) } - fn process_chunk(&mut self, chunk: MessageChunk) -> Result, StatusCode> { + fn process_chunk( + &mut self, + chunk: MessageChunk, + ) -> Result, StatusCode> { // trace!("Got a chunk {:?}", chunk); let (chunk, decoding_limits) = { let mut secure_channel = trace_write_lock_unwrap!(self.secure_channel); - (secure_channel.verify_and_remove_security(&chunk.data)?, secure_channel.decoding_limits()) + ( + secure_channel.verify_and_remove_security(&chunk.data)?, + secure_channel.decoding_limits(), + ) }; let message_header = chunk.message_header(&decoding_limits)?; match message_header.is_final { @@ -161,7 +180,11 @@ impl TcpTransport { const WAIT_POLLING_TIMEOUT: u64 = 100; /// Create a new TCP transport layer for the session - pub fn new(secure_channel: Arc>, session_state: Arc>, message_queue: Arc>) -> TcpTransport { + pub fn new( + secure_channel: Arc>, + session_state: Arc>, + message_queue: Arc>, + ) -> TcpTransport { let connection_state = { let session_state = trace_read_lock_unwrap!(session_state); session_state.connection_state() @@ -180,7 +203,8 @@ impl TcpTransport { panic!("Should not try to connect when already connected"); } - let (host, port) = hostname_port_from_url(&endpoint_url, constants::DEFAULT_OPC_UA_SERVER_PORT)?; + let (host, port) = + hostname_port_from_url(&endpoint_url, constants::DEFAULT_OPC_UA_SERVER_PORT)?; // Resolve the host name into a socket address let addr = { @@ -195,7 +219,11 @@ impl TcpTransport { return Err(StatusCode::BadTcpEndpointUrlInvalid); } } else { - error!("Invalid address {}, cannot be parsed {:?}", addr, addrs.unwrap_err()); + error!( + "Invalid address {}, cannot be parsed {:?}", + addr, + addrs.unwrap_err() + ); return Err(StatusCode::BadTcpEndpointUrlInvalid); } }; @@ -205,8 +233,14 @@ impl TcpTransport { // has also terminated. { - let connection_task = Self::connection_task(addr, self.connection_state.clone(), endpoint_url.to_string(), - self.session_state.clone(), self.secure_channel.clone(), self.message_queue.clone()); + let connection_task = Self::connection_task( + addr, + self.connection_state.clone(), + endpoint_url.to_string(), + self.session_state.clone(), + self.secure_channel.clone(), + self.message_queue.clone(), + ); let connection_state = self.connection_state.clone(); let session_state = self.session_state.clone(); @@ -227,7 +261,10 @@ impl TcpTransport { session_state.on_session_closed(status_code); } connection_state => { - error!("Connect task is not in a finished state, state = {:?}", connection_state); + error!( + "Connect task is not in a finished state, state = {:?}", + connection_state + ); } } deregister_runtime_component!(thread_id); @@ -272,25 +309,38 @@ impl TcpTransport { /// Tests if the transport is connected pub fn is_connected(&self) -> bool { match connection_state!(self.connection_state) { - ConnectionState::NotStarted | ConnectionState::Connecting | - ConnectionState::Finished(_) => false, + ConnectionState::NotStarted + | ConnectionState::Connecting + | ConnectionState::Finished(_) => false, _ => true, } } /// This is the main connection task for a connection. - fn connection_task(addr: SocketAddr, connection_state: Arc>, endpoint_url: String, session_state: Arc>, secure_channel: Arc>, message_queue: Arc>) -> impl Future { - debug!("Creating a connection task to connect to {} with url {}", addr, endpoint_url); + fn connection_task( + addr: SocketAddr, + connection_state: Arc>, + endpoint_url: String, + session_state: Arc>, + secure_channel: Arc>, + message_queue: Arc>, + ) -> impl Future { + debug!( + "Creating a connection task to connect to {} with url {}", + addr, endpoint_url + ); let connection_state_for_error = connection_state.clone(); let connection_state_for_error2 = connection_state.clone(); let hello = { let session_state = trace_read_lock_unwrap!(session_state); - HelloMessage::new(&endpoint_url, - session_state.send_buffer_size(), - session_state.receive_buffer_size(), - session_state.max_message_size()) + HelloMessage::new( + &endpoint_url, + session_state.send_buffer_size(), + session_state.receive_buffer_size(), + session_state.max_message_size(), + ) }; let id = { @@ -302,29 +352,49 @@ impl TcpTransport { register_runtime_component!(connection_task_id.clone()); set_connection_state!(connection_state, ConnectionState::Connecting); - TcpStream::connect(&addr).map_err(move |err| { - error!("Could not connect to host {}, {:?}", addr, err); - set_connection_state!(connection_state_for_error, ConnectionState::Finished(StatusCode::BadCommunicationError)); - }).and_then(move |socket| { - set_connection_state!(connection_state, ConnectionState::Connected); - let (reader, writer) = WrappedTcpStream(socket).split(); - Ok((connection_state, reader, writer)) - }).and_then(move |(connection_state, reader, writer)| { - debug! {"Sending HELLO"}; - io::write_all(writer, hello.encode_to_vec()).map_err(move |err| { - error!("Cannot send hello to server, err = {:?}", err); - set_connection_state!(connection_state_for_error2, ConnectionState::Finished(StatusCode::BadCommunicationError)); - }).map(move |(writer, _)| { - (reader, writer) - }).and_then(move |(reader, writer)| { - Self::spawn_looping_tasks(reader, writer, connection_state, session_state, secure_channel, message_queue); - deregister_runtime_component!(connection_task_id.clone()); - Ok(()) + TcpStream::connect(&addr) + .map_err(move |err| { + error!("Could not connect to host {}, {:?}", addr, err); + set_connection_state!( + connection_state_for_error, + ConnectionState::Finished(StatusCode::BadCommunicationError) + ); + }) + .and_then(move |socket| { + set_connection_state!(connection_state, ConnectionState::Connected); + let (reader, writer) = WrappedTcpStream(socket).split(); + Ok((connection_state, reader, writer)) + }) + .and_then(move |(connection_state, reader, writer)| { + debug! {"Sending HELLO"}; + io::write_all(writer, hello.encode_to_vec()) + .map_err(move |err| { + error!("Cannot send hello to server, err = {:?}", err); + set_connection_state!( + connection_state_for_error2, + ConnectionState::Finished(StatusCode::BadCommunicationError) + ); + }) + .map(move |(writer, _)| (reader, writer)) + .and_then(move |(reader, writer)| { + Self::spawn_looping_tasks( + reader, + writer, + connection_state, + session_state, + secure_channel, + message_queue, + ); + deregister_runtime_component!(connection_task_id.clone()); + Ok(()) + }) }) - }) } - fn write_bytes_task(connection: Arc>, and_close_connection: bool) -> impl Future { + fn write_bytes_task( + connection: Arc>, + and_close_connection: bool, + ) -> impl Future { let (bytes_to_write, writer) = { let mut connection = trace_lock_unwrap!(connection); let bytes_to_write = connection.send_buffer.bytes_to_write(); @@ -333,31 +403,41 @@ impl TcpTransport { }; let connection_for_and_then = connection.clone(); - io::write_all(writer, bytes_to_write).map_err(move |err| { - error!("Write bytes task IO error {:?}", err); - }).map(move |(writer, _)| { - trace!("Write bytes task finished"); - // Reinstate writer - let mut connection = trace_lock_unwrap!(connection); - connection.writer = Some(writer); - }).map_err(|_| { - error!("Write bytes task error"); - }).and_then(move |_| { - // Connection might be closed now - if and_close_connection { - debug!("Write bytes task received a close, so closing connection after this send"); - let mut connection = trace_lock_unwrap!(connection_for_and_then); - let _ = connection.writer.as_mut().unwrap().shutdown(); - connection.writer = None; - Err(()) - } else { - trace!("Write bytes task was not told to close connection"); - Ok(()) - } - }) + io::write_all(writer, bytes_to_write) + .map_err(move |err| { + error!("Write bytes task IO error {:?}", err); + }) + .map(move |(writer, _)| { + trace!("Write bytes task finished"); + // Reinstate writer + let mut connection = trace_lock_unwrap!(connection); + connection.writer = Some(writer); + }) + .map_err(|_| { + error!("Write bytes task error"); + }) + .and_then(move |_| { + // Connection might be closed now + if and_close_connection { + debug!( + "Write bytes task received a close, so closing connection after this send" + ); + let mut connection = trace_lock_unwrap!(connection_for_and_then); + let _ = connection.writer.as_mut().unwrap().shutdown(); + connection.writer = None; + Err(()) + } else { + trace!("Write bytes task was not told to close connection"); + Ok(()) + } + }) } - fn spawn_finished_monitor_task(state: Arc>, finished_flag: Arc>, id: u32) { + fn spawn_finished_monitor_task( + state: Arc>, + finished_flag: Arc>, + id: u32, + ) { // This task just spins around waiting for the connection to become finished. When it // does it, sets a flag. @@ -378,7 +458,9 @@ impl TcpTransport { if finished { // Set the flag let mut finished_flag = trace_write_lock_unwrap!(finished_flag); - debug!("finished monitor task detects finished state and has set a finished flag"); + debug!( + "finished monitor task detects finished state and has set a finished flag" + ); *finished_flag = true; } future::ok(!finished) @@ -395,7 +477,14 @@ impl TcpTransport { tokio::spawn(finished_monitor_task); } - fn spawn_reading_task(reader: ReadHalf, writer_tx: UnboundedSender, finished_flag: Arc>, _receive_buffer_size: usize, connection: ReadState, id: u32) { + fn spawn_reading_task( + reader: ReadHalf, + writer_tx: UnboundedSender, + finished_flag: Arc>, + _receive_buffer_size: usize, + connection: ReadState, + id: u32, + ) { // This is the main processing loop that receives and sends messages let decoding_limits = { let secure_channel = trace_read_lock_unwrap!(connection.secure_channel); @@ -412,91 +501,116 @@ impl TcpTransport { // The reader reads frames from the codec, which are messages let framed_reader = FramedRead::new(reader, TcpCodec::new(finished_flag, decoding_limits)); - let looping_task = framed_reader.for_each(move |message| { - let mut connection = trace_write_lock_unwrap!(connection); - let mut session_status_code = StatusCode::Good; - match message { - Message::Acknowledge(ack) => { - debug!("Reader got ack {:?}", ack); - if connection_state!(connection.state) != ConnectionState::WaitingForAck { - error!("Reader got an unexpected ACK"); - session_status_code = StatusCode::BadUnexpectedError; - } else { - // TODO revise our sizes and other things according to the ACK - set_connection_state!(connection.state, ConnectionState::Processing); + let looping_task = framed_reader + .for_each(move |message| { + let mut connection = trace_write_lock_unwrap!(connection); + let mut session_status_code = StatusCode::Good; + match message { + Message::Acknowledge(ack) => { + debug!("Reader got ack {:?}", ack); + if connection_state!(connection.state) != ConnectionState::WaitingForAck { + error!("Reader got an unexpected ACK"); + session_status_code = StatusCode::BadUnexpectedError; + } else { + // TODO revise our sizes and other things according to the ACK + set_connection_state!(connection.state, ConnectionState::Processing); + } } - } - Message::Chunk(chunk) => { - if connection_state!(connection.state) != ConnectionState::Processing { - error!("Got an unexpected message chunk"); - session_status_code = StatusCode::BadUnexpectedError; - } else { - match connection.process_chunk(chunk) { - Ok(response) => if let Some(response) = response { - // Store the response - let mut message_queue = trace_write_lock_unwrap!(connection.message_queue); - message_queue.store_response(response); - } - Err(err) => session_status_code = err - }; + Message::Chunk(chunk) => { + if connection_state!(connection.state) != ConnectionState::Processing { + error!("Got an unexpected message chunk"); + session_status_code = StatusCode::BadUnexpectedError; + } else { + match connection.process_chunk(chunk) { + Ok(response) => { + if let Some(response) = response { + // Store the response + let mut message_queue = + trace_write_lock_unwrap!(connection.message_queue); + message_queue.store_response(response); + } + } + Err(err) => session_status_code = err, + }; + } + } + Message::Error(error) => { + // TODO client should go into an error recovery state, dropping the connection and reestablishing it. + session_status_code = + if let Some(status_code) = StatusCode::from_u32(error.error) { + status_code + } else { + StatusCode::BadUnexpectedError + }; + error!( + "Expecting a chunk, got an error message {}", + session_status_code + ); + } + _ => { + panic!("Expected a recognized message"); } } - Message::Error(error) => { - // TODO client should go into an error recovery state, dropping the connection and reestablishing it. - session_status_code = if let Some(status_code) = StatusCode::from_u32(error.error) { - status_code - } else { - StatusCode::BadUnexpectedError - }; - error!("Expecting a chunk, got an error message {}", session_status_code); - } - _ => { - panic!("Expected a recognized message"); + if session_status_code.is_bad() { + error!( + "Reader is putting connection into a finished state with status {}", + session_status_code + ); + set_connection_state!( + connection.state, + ConnectionState::Finished(session_status_code) + ); + // Tell the writer to quit + debug!("Reader is sending a quit to the writer"); + if let Err(err) = writer_tx.unbounded_send(message_queue::Message::Quit) { + debug!("Cannot send quit to writer, error = {:?}", err); + } + Err(std::io::ErrorKind::ConnectionReset.into()) + } else { + Ok(()) } - } - if session_status_code.is_bad() { - error!("Reader is putting connection into a finished state with status {}", session_status_code); - set_connection_state!(connection.state, ConnectionState::Finished(session_status_code)); - // Tell the writer to quit - debug!("Reader is sending a quit to the writer"); - if let Err(err) = writer_tx.unbounded_send(message_queue::Message::Quit) { - debug!("Cannot send quit to writer, error = {:?}", err); + }) + .map_err(move |e| { + error!("Read loop error {:?}", e); + let connection = trace_read_lock_unwrap!(connection_for_error); + let state = connection_state!(connection.state); + match state { + ConnectionState::Finished(_) => { /* DO NOTHING */ } + _ => { + set_connection_state!( + connection.state, + ConnectionState::Finished(StatusCode::BadCommunicationError) + ); + } } - Err(std::io::ErrorKind::ConnectionReset.into()) - } else { - Ok(()) - } - }).map_err(move |e| { - error!("Read loop error {:?}", e); - let connection = trace_read_lock_unwrap!(connection_for_error); - let state = connection_state!(connection.state); - match state { - ConnectionState::Finished(_) => { /* DO NOTHING */ } - _ => { - set_connection_state!(connection.state, ConnectionState::Finished(StatusCode::BadCommunicationError)); + }) + .and_then(move |_| { + let connection = trace_read_lock_unwrap!(connection_for_terminate); + let state = connection_state!(connection.state); + if let ConnectionState::Finished(_) = state { + debug!("Read loop is terminating due to finished state"); + Err(()) + } else { + // Read / write messages + Ok(()) } - } - }).and_then(move |_| { - let connection = trace_read_lock_unwrap!(connection_for_terminate); - let state = connection_state!(connection.state); - if let ConnectionState::Finished(_) = state { - debug!("Read loop is terminating due to finished state"); - Err(()) - } else { - // Read / write messages - Ok(()) - } - }).map(move |_| { - debug!("Read loop finished"); - deregister_runtime_component!(read_task_id); - }).map_err(move |_| { - debug!("Read loop ended with an error"); - deregister_runtime_component!(read_task_id_for_err); - }); + }) + .map(move |_| { + debug!("Read loop finished"); + deregister_runtime_component!(read_task_id); + }) + .map_err(move |_| { + debug!("Read loop ended with an error"); + deregister_runtime_component!(read_task_id_for_err); + }); tokio::spawn(looping_task); } - fn spawn_writing_task(receiver: UnboundedReceiver, connection: WriteState, id: u32) { + fn spawn_writing_task( + receiver: UnboundedReceiver, + connection: WriteState, + id: u32, + ) { let connection = Arc::new(Mutex::new(connection)); let connection_for_error = connection.clone(); @@ -588,10 +702,21 @@ impl TcpTransport { /// This is the main processing loop for the connection. It writes requests and reads responses /// over the socket to the server. - fn spawn_looping_tasks(reader: ReadHalf, writer: WriteHalf, connection_state: Arc>, session_state: Arc>, secure_channel: Arc>, message_queue: Arc>) { + fn spawn_looping_tasks( + reader: ReadHalf, + writer: WriteHalf, + connection_state: Arc>, + session_state: Arc>, + secure_channel: Arc>, + message_queue: Arc>, + ) { let (receive_buffer_size, send_buffer_size, id) = { let session_state = trace_read_lock_unwrap!(session_state); - (session_state.receive_buffer_size(), session_state.send_buffer_size(), session_state.id()) + ( + session_state.receive_buffer_size(), + session_state.send_buffer_size(), + session_state.id(), + ) }; // Create the message receiver that will drive writes @@ -615,7 +740,14 @@ impl TcpTransport { last_received_sequence_number: 0, message_queue: message_queue.clone(), }; - Self::spawn_reading_task(reader, sender, finished_flag, receive_buffer_size, read_connection, id); + Self::spawn_reading_task( + reader, + sender, + finished_flag, + receive_buffer_size, + read_connection, + id, + ); } // Spawn the writing task loop diff --git a/client/src/comms/transport.rs b/client/src/comms/transport.rs index 120e884ca..18cbe7b35 100644 --- a/client/src/comms/transport.rs +++ b/client/src/comms/transport.rs @@ -5,4 +5,4 @@ /// A trait common to all transport implementations pub(crate) trait Transport { // Common functions will go here -} \ No newline at end of file +} diff --git a/client/src/config.rs b/client/src/config.rs index 0c920dd73..23de27335 100644 --- a/client/src/config.rs +++ b/client/src/config.rs @@ -4,12 +4,7 @@ //! Client configuration data. -use std::{ - self, - collections::BTreeMap, - path::PathBuf, - str::FromStr, -}; +use std::{self, collections::BTreeMap, path::PathBuf, str::FromStr}; use opcua_core::config::Config; use opcua_crypto::SecurityPolicy; @@ -34,7 +29,11 @@ pub struct ClientUserToken { impl ClientUserToken { /// Constructs a client token which holds a username and password. - pub fn user_pass(user: S, password: T) -> Self where S: Into, T: Into { + pub fn user_pass(user: S, password: T) -> Self + where + S: Into, + T: Into, + { ClientUserToken { user: user.into(), password: Some(password.into()), @@ -44,7 +43,10 @@ impl ClientUserToken { } /// Constructs a client token which holds a username and paths to X509 certificate and private key. - pub fn x509(user: S, cert_path: &PathBuf, private_key_path: &PathBuf) -> Self where S: Into { + pub fn x509(user: S, cert_path: &PathBuf, private_key_path: &PathBuf) -> Self + where + S: Into, + { // Apparently on Windows, a PathBuf can hold weird non-UTF chars but they will not // be stored in a config file properly in any event, so this code will lossily strip them out. ClientUserToken { @@ -66,12 +68,18 @@ impl ClientUserToken { // A token must properly represent one kind of token or it is not valid if self.password.is_some() { if self.cert_path.is_some() || self.private_key_path.is_some() { - error!("User token {} holds a password and certificate info - it cannot be both.", self.user); + error!( + "User token {} holds a password and certificate info - it cannot be both.", + self.user + ); valid = false; } } else { if self.cert_path.is_none() && self.private_key_path.is_none() { - error!("User token {} fails to provide a password or certificate info.", self.user); + error!( + "User token {} fails to provide a password or certificate info.", + self.user + ); valid = false; } else if self.cert_path.is_none() || self.private_key_path.is_none() { error!("User token {} fails to provide both a certificate path and a private key path.", self.user); @@ -98,7 +106,10 @@ pub struct ClientEndpoint { impl ClientEndpoint { /// Makes a client endpoint - pub fn new(url: T) -> Self where T: Into { + pub fn new(url: T) -> Self + where + T: Into, + { ClientEndpoint { url: url.into(), security_policy: SecurityPolicy::None.to_str().into(), @@ -168,7 +179,10 @@ impl Config for ClientConfig { valid = false; } if self.user_tokens.contains_key(ANONYMOUS_USER_TOKEN_ID) { - error!("User tokens contains the reserved \"{}\" id", ANONYMOUS_USER_TOKEN_ID); + error!( + "User tokens contains the reserved \"{}\" id", + ANONYMOUS_USER_TOKEN_ID + ); valid = false; } if self.user_tokens.contains_key("") { @@ -183,24 +197,38 @@ impl Config for ClientConfig { if self.endpoints.is_empty() { warn!("Endpoint config contains no endpoints"); } else { -// Check for invalid ids in endpoints + // Check for invalid ids in endpoints if self.endpoints.contains_key("") { error!("Endpoints contains an endpoint with an empty id"); valid = false; } - if !self.default_endpoint.is_empty() && !self.endpoints.contains_key(&self.default_endpoint) { - error!("Default endpoint id {} does not exist in list of endpoints", self.default_endpoint); + if !self.default_endpoint.is_empty() + && !self.endpoints.contains_key(&self.default_endpoint) + { + error!( + "Default endpoint id {} does not exist in list of endpoints", + self.default_endpoint + ); valid = false; } -// Check for invalid security policy and modes in endpoints + // Check for invalid security policy and modes in endpoints self.endpoints.iter().for_each(|(id, e)| { - if SecurityPolicy::from_str(&e.security_policy).unwrap() != SecurityPolicy::Unknown { - if MessageSecurityMode::Invalid == MessageSecurityMode::from(e.security_mode.as_ref()) { - error!("Endpoint {} security mode {} is invalid", id, e.security_mode); + if SecurityPolicy::from_str(&e.security_policy).unwrap() != SecurityPolicy::Unknown + { + if MessageSecurityMode::Invalid + == MessageSecurityMode::from(e.security_mode.as_ref()) + { + error!( + "Endpoint {} security mode {} is invalid", + id, e.security_mode + ); valid = false; } } else { - error!("Endpoint {} security policy {} is invalid", id, e.security_policy); + error!( + "Endpoint {} security policy {} is invalid", + id, e.security_policy + ); valid = false; } }); @@ -212,13 +240,21 @@ impl Config for ClientConfig { valid } - fn application_name(&self) -> UAString { UAString::from(&self.application_name) } + fn application_name(&self) -> UAString { + UAString::from(&self.application_name) + } - fn application_uri(&self) -> UAString { UAString::from(&self.application_uri) } + fn application_uri(&self) -> UAString { + UAString::from(&self.application_uri) + } - fn product_uri(&self) -> UAString { UAString::from(&self.product_uri) } + fn product_uri(&self) -> UAString { + UAString::from(&self.product_uri) + } - fn application_type(&self) -> ApplicationType { ApplicationType::Client } + fn application_type(&self) -> ApplicationType { + ApplicationType::Client + } } impl Default for ClientConfig { @@ -231,7 +267,10 @@ impl ClientConfig { /// The default PKI directory pub const PKI_DIR: &'static str = "pki"; - pub fn new(application_name: T, application_uri: T) -> Self where T: Into { + pub fn new(application_name: T, application_uri: T) -> Self + where + T: Into, + { let mut pki_dir = std::env::current_dir().unwrap(); pki_dir.push(Self::PKI_DIR); @@ -254,4 +293,3 @@ impl ClientConfig { } } } - diff --git a/client/src/lib.rs b/client/src/lib.rs index c6cb7ef53..93ff6e43a 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -101,29 +101,32 @@ extern crate opcua_core; #[macro_use] extern crate serde_derive; -use opcua_types::{response_header::ResponseHeader, status_code::StatusCode}; use opcua_core::supported_message::SupportedMessage; +use opcua_types::{response_header::ResponseHeader, status_code::StatusCode}; mod comms; +mod message_queue; +mod session_state; mod subscription; mod subscription_state; mod subscription_timer; -mod session_state; -mod message_queue; // Use through prelude -mod config; +mod builder; +mod callbacks; mod client; +mod config; mod session; -mod callbacks; -mod builder; mod session_retry; /// Process the service result, i.e. where the request "succeeded" but the response /// contains a failure status code. pub(crate) fn process_service_result(response_header: &ResponseHeader) -> Result<(), StatusCode> { if response_header.service_result.is_bad() { - info!("Received a bad service result {} from the request", response_header.service_result); + info!( + "Received a bad service result {} from the request", + response_header.service_result + ); Err(response_header.service_result) } else { Ok(()) @@ -133,7 +136,10 @@ pub(crate) fn process_service_result(response_header: &ResponseHeader) -> Result pub(crate) fn process_unexpected_response(response: SupportedMessage) -> StatusCode { match response { SupportedMessage::ServiceFault(service_fault) => { - error!("Received a service fault of {} for the request", service_fault.response_header.service_result); + error!( + "Received a service fault of {} for the request", + service_fault.response_header.service_result + ); service_fault.response_header.service_result } _ => { @@ -149,14 +155,9 @@ pub mod prelude { pub use opcua_types::{service_types::*, status_code::StatusCode}; pub use crate::{ - builder::*, - callbacks::*, - client::*, - config::*, - session::*, - subscription::MonitoredItem, + builder::*, callbacks::*, client::*, config::*, session::*, subscription::MonitoredItem, }; } #[cfg(test)] -mod tests; \ No newline at end of file +mod tests; diff --git a/client/src/message_queue.rs b/client/src/message_queue.rs index 1035f60e2..2554ced9b 100644 --- a/client/src/message_queue.rs +++ b/client/src/message_queue.rs @@ -39,7 +39,9 @@ impl MessageQueue { } // Creates the transmission queue that outgoing requests will be sent over - pub(crate) fn make_request_channel(&mut self) -> (UnboundedSender, UnboundedReceiver) { + pub(crate) fn make_request_channel( + &mut self, + ) -> (UnboundedSender, UnboundedReceiver) { let (tx, rx) = mpsc::unbounded::(); self.sender = Some(tx.clone()); (tx, rx) @@ -71,7 +73,10 @@ impl MessageQueue { /// Called when a session's request times out. This call allows the session state to remove /// the request as pending and ignore any response that arrives for it. pub(crate) fn request_has_timed_out(&mut self, request_handle: u32) { - info!("Request {} has timed out and any response will be ignored", request_handle); + info!( + "Request {} has timed out and any response will be ignored", + request_handle + ); let _ = self.inflight_requests.remove(&(request_handle, false)); let _ = self.inflight_requests.remove(&(request_handle, true)); } @@ -96,7 +101,9 @@ impl MessageQueue { /// returns them to the caller. pub(crate) fn async_responses(&mut self) -> Vec { // Gather up all request handles - let mut async_handles = self.responses.iter() + let mut async_handles = self + .responses + .iter() .filter(|(_, v)| v.1) .map(|(k, _)| *k) .collect::>(); @@ -105,7 +112,8 @@ impl MessageQueue { async_handles.sort(); // Remove each item from the map and return to caller - async_handles.iter() + async_handles + .iter() .map(|k| self.responses.remove(k).unwrap().0) .collect() } @@ -114,4 +122,4 @@ impl MessageQueue { pub(crate) fn take_response(&mut self, request_handle: u32) -> Option { self.responses.remove(&request_handle).map(|v| v.0) } -} \ No newline at end of file +} diff --git a/client/src/session.rs b/client/src/session.rs index 4877b2d45..62342fc53 100644 --- a/client/src/session.rs +++ b/client/src/session.rs @@ -8,16 +8,17 @@ //! The session also has async functionality but that is reserved for publish requests on subscriptions //! and events. use std::{ - cmp, collections::HashSet, convert::TryFrom, result::Result, str::FromStr, sync::{Arc, mpsc, Mutex, RwLock}, + cmp, + collections::HashSet, + convert::TryFrom, + result::Result, + str::FromStr, + sync::{mpsc, Arc, Mutex, RwLock}, thread, time::{Duration, Instant}, }; -use futures::{ - future, Future, - stream::Stream, - sync::mpsc::UnboundedSender, -}; +use futures::{future, stream::Stream, sync::mpsc::UnboundedSender, Future}; use tokio; use tokio_timer::Interval; @@ -28,11 +29,14 @@ use opcua_core::{ }, supported_message::SupportedMessage, }; -use opcua_crypto::{self as crypto, CertificateStore, SecurityPolicy, user_identity::make_user_name_identity_token, X509}; +use opcua_crypto::{ + self as crypto, user_identity::make_user_name_identity_token, CertificateStore, SecurityPolicy, + X509, +}; use opcua_types::{ - *, node_ids::{MethodId, ObjectId}, status_code::StatusCode, + *, }; use crate::{ @@ -102,7 +106,7 @@ impl Into for (EndpointDescription, client::IdentityToken) { /// A `Session` runs in a loop, which can be terminated by sending it a `SessionCommand`. pub enum SessionCommand { /// Stop running as soon as possible - Stop + Stop, } /// A session of the client. The session is associated with an endpoint and maintains a state @@ -157,16 +161,35 @@ impl Session { /// /// * `Session` - the interface that shall be used to communicate between the client and the server. /// - pub(crate) fn new(application_description: ApplicationDescription, certificate_store: Arc>, session_info: SessionInfo, session_retry_policy: SessionRetryPolicy) -> Session { + pub(crate) fn new( + application_description: ApplicationDescription, + certificate_store: Arc>, + session_info: SessionInfo, + session_retry_policy: SessionRetryPolicy, + ) -> Session { // TODO take these from the client config let decoding_limits = DecodingLimits::default(); - let secure_channel = Arc::new(RwLock::new(SecureChannel::new(certificate_store.clone(), Role::Client, decoding_limits))); + let secure_channel = Arc::new(RwLock::new(SecureChannel::new( + certificate_store.clone(), + Role::Client, + decoding_limits, + ))); let message_queue = Arc::new(RwLock::new(MessageQueue::new())); - let session_state = Arc::new(RwLock::new(SessionState::new(secure_channel.clone(), message_queue.clone()))); - let transport = TcpTransport::new(secure_channel.clone(), session_state.clone(), message_queue.clone()); + let session_state = Arc::new(RwLock::new(SessionState::new( + secure_channel.clone(), + message_queue.clone(), + ))); + let transport = TcpTransport::new( + secure_channel.clone(), + session_state.clone(), + message_queue.clone(), + ); let subscription_state = Arc::new(RwLock::new(SubscriptionState::new())); - let timer_command_queue = SubscriptionTimer::make_timer_command_queue(session_state.clone(), subscription_state.clone()); + let timer_command_queue = SubscriptionTimer::make_timer_command_queue( + session_state.clone(), + subscription_state.clone(), + ); Session { application_description, session_info, @@ -215,7 +238,10 @@ impl Session { /// /// * `session_closed_callback` - the session closed callback /// - pub fn set_session_closed_callback(&mut self, session_closed_callback: CB) where CB: OnSessionClosed + Send + Sync + 'static { + pub fn set_session_closed_callback(&mut self, session_closed_callback: CB) + where + CB: OnSessionClosed + Send + Sync + 'static, + { let mut session_state = trace_write_lock_unwrap!(self.session_state); session_state.set_session_closed_callback(session_closed_callback); } @@ -227,7 +253,10 @@ impl Session { /// /// * `connection_status_callback` - the connection status callback. /// - pub fn set_connection_status_callback(&mut self, connection_status_callback: CB) where CB: OnConnectionStatusChange + Send + Sync + 'static { + pub fn set_connection_status_callback(&mut self, connection_status_callback: CB) + where + CB: OnConnectionStatusChange + Send + Sync + 'static, + { let mut session_state = trace_write_lock_unwrap!(self.session_state); session_state.set_connection_status_callback(connection_status_callback); } @@ -247,7 +276,10 @@ impl Session { pub fn reconnect_and_activate(&mut self) -> Result<(), StatusCode> { // Do nothing if already connected / activated if self.is_connected() { - session_error!(self, "Reconnect is going to do nothing because already connected"); + session_error!( + self, + "Reconnect is going to do nothing because already connected" + ); Err(StatusCode::BadUnexpectedError) } else { // Clear the existing secure channel state @@ -305,7 +337,10 @@ impl Session { if let Some(subscription_ids) = subscription_ids { // Try to use TransferSubscriptions to move subscriptions_ids over. If this // works then there is nothing else to do. - let mut subscription_ids_to_recreate = subscription_ids.iter().map(|s| *s).collect::>(); + let mut subscription_ids_to_recreate = subscription_ids + .iter() + .map(|s| *s) + .collect::>(); if let Ok(transfer_results) = self.transfer_subscriptions(&subscription_ids, true) { session_debug!(self, "transfer_results = {:?}", transfer_results); transfer_results.iter().enumerate().for_each(|(i, r)| { @@ -322,58 +357,80 @@ impl Session { } // Now create any subscriptions that could not be transferred - subscription_ids_to_recreate.iter().for_each(|subscription_id| { - info!("Recreating subscription {}", subscription_id); - // Remove the subscription data, create it again from scratch - let deleted_subscription = { - let mut subscription_state = trace_write_lock_unwrap!(subscription_state); - subscription_state.delete_subscription(*subscription_id) - }; + subscription_ids_to_recreate + .iter() + .for_each(|subscription_id| { + info!("Recreating subscription {}", subscription_id); + // Remove the subscription data, create it again from scratch + let deleted_subscription = { + let mut subscription_state = trace_write_lock_unwrap!(subscription_state); + subscription_state.delete_subscription(*subscription_id) + }; - if let Some(subscription) = deleted_subscription { - // Attempt to replicate the subscription (subscription id will be new) - if let Ok(subscription_id) = self.create_subscription_inner( - subscription.publishing_interval(), - subscription.lifetime_count(), - subscription.max_keep_alive_count(), - subscription.max_notifications_per_publish(), - subscription.priority(), - subscription.publishing_enabled(), - subscription.notification_callback()) { - info!("New subscription created with id {}", subscription_id); - - // For each monitored item - let items_to_create = subscription.monitored_items().iter().map(|(_, item)| { - MonitoredItemCreateRequest { - item_to_monitor: item.item_to_monitor().clone(), - monitoring_mode: item.monitoring_mode(), - requested_parameters: MonitoringParameters { - client_handle: item.client_handle(), - sampling_interval: item.sampling_interval(), - filter: ExtensionObject::null(), - queue_size: item.queue_size(), - discard_oldest: true, - }, - } - }).collect::>(); - let _ = self.create_monitored_items(subscription_id, TimestampsToReturn::Both, &items_to_create); - - // Recreate any triggers for the monitored item. This code assumes monitored item - // ids are the same value as they were in the previous subscription. - subscription.monitored_items().iter().for_each(|(_, item)| { - let triggered_items = item.triggered_items(); - if !triggered_items.is_empty() { - let links_to_add = triggered_items.iter().map(|i| *i).collect::>(); - let _ = self.set_triggering(subscription_id, item.id(), links_to_add.as_slice(), &[]); - } - }); + if let Some(subscription) = deleted_subscription { + // Attempt to replicate the subscription (subscription id will be new) + if let Ok(subscription_id) = self.create_subscription_inner( + subscription.publishing_interval(), + subscription.lifetime_count(), + subscription.max_keep_alive_count(), + subscription.max_notifications_per_publish(), + subscription.priority(), + subscription.publishing_enabled(), + subscription.notification_callback(), + ) { + info!("New subscription created with id {}", subscription_id); + + // For each monitored item + let items_to_create = subscription + .monitored_items() + .iter() + .map(|(_, item)| MonitoredItemCreateRequest { + item_to_monitor: item.item_to_monitor().clone(), + monitoring_mode: item.monitoring_mode(), + requested_parameters: MonitoringParameters { + client_handle: item.client_handle(), + sampling_interval: item.sampling_interval(), + filter: ExtensionObject::null(), + queue_size: item.queue_size(), + discard_oldest: true, + }, + }) + .collect::>(); + let _ = self.create_monitored_items( + subscription_id, + TimestampsToReturn::Both, + &items_to_create, + ); + + // Recreate any triggers for the monitored item. This code assumes monitored item + // ids are the same value as they were in the previous subscription. + subscription.monitored_items().iter().for_each(|(_, item)| { + let triggered_items = item.triggered_items(); + if !triggered_items.is_empty() { + let links_to_add = + triggered_items.iter().map(|i| *i).collect::>(); + let _ = self.set_triggering( + subscription_id, + item.id(), + links_to_add.as_slice(), + &[], + ); + } + }); + } else { + session_warn!( + self, + "Could not create a subscription from the existing subscription {}", + subscription_id + ); + } } else { - session_warn!(self, "Could not create a subscription from the existing subscription {}", subscription_id); + panic!( + "Subscription {}, doesn't exist although it should", + subscription_id + ); } - } else { - panic!("Subscription {}, doesn't exist although it should", subscription_id); - } - }); + }); // Now all the subscriptions should have been recreated, it should be possible // to kick off the publish timers. @@ -382,7 +439,9 @@ impl Session { subscription_state.subscription_ids().unwrap() }; for subscription_id in &subscription_ids { - let _ = self.timer_command_queue.unbounded_send(SubscriptionTimerCommand::CreateTimer(*subscription_id)); + let _ = self + .timer_command_queue + .unbounded_send(SubscriptionTimerCommand::CreateTimer(*subscription_id)); } } Ok(()) @@ -401,7 +460,12 @@ impl Session { } Err(status_code) => { self.session_retry_policy.increment_retry_count(); - session_warn!(self, "Connect was unsuccessful, error = {}, retries = {}", status_code, self.session_retry_policy.retry_count()); + session_warn!( + self, + "Connect was unsuccessful, error = {}, retries = {}", + status_code, + self.session_retry_policy.retry_count() + ); use chrono::Utc; match self.session_retry_policy.should_retry_connect(Utc::now()) { @@ -436,9 +500,15 @@ impl Session { pub fn connect_no_retry(&mut self) -> Result<(), StatusCode> { let endpoint_url = self.session_info.endpoint.endpoint_url.clone(); info!("Connect"); - let security_policy = SecurityPolicy::from_str(self.session_info.endpoint.security_policy_uri.as_ref()).unwrap(); + let security_policy = + SecurityPolicy::from_str(self.session_info.endpoint.security_policy_uri.as_ref()) + .unwrap(); if security_policy == SecurityPolicy::Unknown { - session_error!(self, "connect, security policy \"{}\" is unknown", self.session_info.endpoint.security_policy_uri.as_ref()); + session_error!( + self, + "connect, security policy \"{}\" is unknown", + self.session_info.endpoint.security_policy_uri.as_ref() + ); Err(StatusCode::BadSecurityPolicyRejected) } else { let (cert, key) = { @@ -452,9 +522,14 @@ impl Session { secure_channel.set_cert(cert); secure_channel.set_security_policy(security_policy); secure_channel.set_security_mode(self.session_info.endpoint.security_mode); - let _ = secure_channel.set_remote_cert_from_byte_string(&self.session_info.endpoint.server_certificate); + let _ = secure_channel.set_remote_cert_from_byte_string( + &self.session_info.endpoint.server_certificate, + ); info!("Security policy = {:?}", security_policy); - info!("Security mode = {:?}", self.session_info.endpoint.security_mode); + info!( + "Security mode = {:?}", + self.session_info.endpoint.security_mode + ); } self.transport.connect(endpoint_url.as_ref())?; self.open_secure_channel()?; @@ -543,15 +618,17 @@ impl Session { /// pub fn run_async(session: Arc>) -> mpsc::Sender { let (tx, rx) = mpsc::channel(); - thread::spawn(move || { - Self::run_loop(session, Self::POLL_SLEEP_INTERVAL, rx) - }); + thread::spawn(move || Self::run_loop(session, Self::POLL_SLEEP_INTERVAL, rx)); tx } /// The main running loop for a session. This is used by `run()` and `run_async()` to run /// continuously until a signal is received to terminate. - fn run_loop(session: Arc>, sleep_interval: u64, rx: mpsc::Receiver) { + fn run_loop( + session: Arc>, + sleep_interval: u64, + rx: mpsc::Receiver, + ) { loop { if let Ok(command) = rx.try_recv() { // Received a command @@ -605,7 +682,11 @@ impl Session { use chrono::Utc; match self.session_retry_policy.should_retry_connect(Utc::now()) { Answer::GiveUp => { - session_error!(self, "Session has given up trying to reconnect to the server after {} retries", self.session_retry_policy.retry_count()); + session_error!( + self, + "Session has given up trying to reconnect to the server after {} retries", + self.session_retry_policy.retry_count() + ); return Err(()); } Answer::Retry => { @@ -616,7 +697,11 @@ impl Session { self.session_retry_policy.reset_retry_count(); } else { self.session_retry_policy.increment_retry_count(); - session_warn!(self, "Reconnect was unsuccessful, retries = {}", self.session_retry_policy.retry_count()); + session_warn!( + self, + "Reconnect was unsuccessful, retries = {}", + self.session_retry_policy.retry_count() + ); } true } @@ -650,7 +735,13 @@ impl Session { /// [`FindServersRequest`]: ./struct.FindServersRequest.html /// [`ApplicationDescription`]: ./struct.ApplicationDescription.html /// - pub fn find_servers(&mut self, endpoint_url: T) -> Result, StatusCode> where T: Into { + pub fn find_servers( + &mut self, + endpoint_url: T, + ) -> Result, StatusCode> + where + T: Into, + { let request = FindServersRequest { request_header: self.make_request_header(), endpoint_url: endpoint_url.into(), @@ -853,8 +944,10 @@ impl Session { session_state.set_authentication_token(response.authentication_token.clone()); { let mut secure_channel = trace_write_lock_unwrap!(self.secure_channel); - let _ = secure_channel.set_remote_nonce_from_byte_string(&response.server_nonce); - let _ = secure_channel.set_remote_cert_from_byte_string(&response.server_certificate); + let _ = + secure_channel.set_remote_nonce_from_byte_string(&response.server_nonce); + let _ = secure_channel + .set_remote_cert_from_byte_string(&response.server_certificate); } session_state.session_id() }; @@ -864,13 +957,23 @@ impl Session { // The server certificate is validated if the policy requires it let security_policy = self.security_policy(); let cert_status_code = if security_policy != SecurityPolicy::None { - if let Ok(server_certificate) = crypto::X509::from_byte_string(&response.server_certificate) { + if let Ok(server_certificate) = + crypto::X509::from_byte_string(&response.server_certificate) + { // Validate server certificate against hostname and application_uri - let hostname = hostname_from_url(self.session_info.endpoint.endpoint_url.as_ref()).map_err(|_| StatusCode::BadUnexpectedError)?; - let application_uri = self.session_info.endpoint.server.application_uri.as_ref(); + let hostname = + hostname_from_url(self.session_info.endpoint.endpoint_url.as_ref()) + .map_err(|_| StatusCode::BadUnexpectedError)?; + let application_uri = + self.session_info.endpoint.server.application_uri.as_ref(); let certificate_store = trace_write_lock_unwrap!(self.certificate_store); - let result = certificate_store.validate_or_reject_application_instance_cert(&server_certificate, security_policy, Some(&hostname), Some(application_uri)); + let result = certificate_store.validate_or_reject_application_instance_cert( + &server_certificate, + security_policy, + Some(&hostname), + Some(application_uri), + ); if result.is_bad() { result } else { @@ -890,7 +993,11 @@ impl Session { } else { // Spawn a task to ping the server to keep the connection alive before the session // timeout period. - session_debug!(self, "Revised session timeout is {}", response.revised_session_timeout); + session_debug!( + self, + "Revised session timeout is {}", + response.revised_session_timeout + ); self.spawn_session_activity_task(response.revised_session_timeout); // TODO Verify signature using server's public key (from endpoint) comparing with data made from client certificate and nonce. @@ -924,7 +1031,12 @@ impl Session { // Session activity will happen every 3/4 of the timeout period const MIN_SESSION_ACTIVITY_MS: u64 = 1000; let session_activity = cmp::max((session_timeout as u64 / 4) * 3, MIN_SESSION_ACTIVITY_MS); - session_debug!(self, "session timeout is {}, activity timer is {}", session_timeout, session_activity); + session_debug!( + self, + "session timeout is {}, activity timer is {}", + session_timeout, + session_activity + ); let last_timeout = Arc::new(Mutex::new(Instant::now())); @@ -1007,7 +1119,12 @@ impl Session { None } else { // Ids are - let locale_ids = self.session_info.preferred_locales.iter().map(|id| UAString::from(id)).collect(); + let locale_ids = self + .session_info + .preferred_locales + .iter() + .map(|id| UAString::from(id)) + .collect(); Some(locale_ids) }; @@ -1024,24 +1141,38 @@ impl Session { certificate_store.read_own_cert_and_pkey_optional() }; - // Create a signature data // let session_state = self.session_state.lock().unwrap(); if client_pkey.is_none() { session_error!(self, "Cannot create client signature - no pkey!"); return Err(StatusCode::BadUnexpectedError); } else if server_cert.is_none() { - session_error!(self, "Cannot sign server certificate because server cert is null"); + session_error!( + self, + "Cannot sign server certificate because server cert is null" + ); return Err(StatusCode::BadUnexpectedError); } else if server_nonce.is_empty() { - session_error!(self, "Cannot sign server certificate because server nonce is empty"); + session_error!( + self, + "Cannot sign server certificate because server nonce is empty" + ); return Err(StatusCode::BadUnexpectedError); } - let server_cert = secure_channel.remote_cert().as_ref().unwrap().as_byte_string(); + let server_cert = secure_channel + .remote_cert() + .as_ref() + .unwrap() + .as_byte_string(); let server_nonce = ByteString::from(secure_channel.remote_nonce()); let signing_key = client_pkey.as_ref().unwrap(); - crypto::create_signature_data(signing_key, security_policy, &server_cert, &server_nonce)? + crypto::create_signature_data( + signing_key, + security_policy, + &server_cert, + &server_nonce, + )? } }; @@ -1118,7 +1249,10 @@ impl Session { /// [`AddNodesItem`]: ./struct.AddNodesItem.html /// [`AddNodesResult`]: ./struct.AddNodesResult.html /// - pub fn add_nodes(&mut self, nodes_to_add: &[AddNodesItem]) -> Result, StatusCode> { + pub fn add_nodes( + &mut self, + nodes_to_add: &[AddNodesItem], + ) -> Result, StatusCode> { if nodes_to_add.is_empty() { session_error!(self, "add_nodes, called with no nodes to add"); Err(StatusCode::BadNothingToDo) @@ -1152,7 +1286,10 @@ impl Session { /// [`AddReferencesRequest`]: ./struct.AddReferencesRequest.html /// [`AddReferencesItem`]: ./struct.AddReferencesItem.html /// - pub fn add_references(&mut self, references_to_add: &[AddReferencesItem]) -> Result, StatusCode> { + pub fn add_references( + &mut self, + references_to_add: &[AddReferencesItem], + ) -> Result, StatusCode> { if references_to_add.is_empty() { session_error!(self, "add_references, called with no references to add"); Err(StatusCode::BadNothingToDo) @@ -1186,7 +1323,10 @@ impl Session { /// [`DeleteNodesRequest`]: ./struct.DeleteNodesRequest.html /// [`DeleteNodesItem`]: ./struct.DeleteNodesItem.html /// - pub fn delete_nodes(&mut self, nodes_to_delete: &[DeleteNodesItem]) -> Result, StatusCode> { + pub fn delete_nodes( + &mut self, + nodes_to_delete: &[DeleteNodesItem], + ) -> Result, StatusCode> { if nodes_to_delete.is_empty() { session_error!(self, "delete_nodes, called with no nodes to delete"); Err(StatusCode::BadNothingToDo) @@ -1220,9 +1360,15 @@ impl Session { /// [`DeleteReferencesRequest`]: ./struct.DeleteReferencesRequest.html /// [`DeleteReferencesItem`]: ./struct.DeleteReferencesItem.html /// - pub fn delete_references(&mut self, references_to_delete: &[DeleteReferencesItem]) -> Result, StatusCode> { + pub fn delete_references( + &mut self, + references_to_delete: &[DeleteReferencesItem], + ) -> Result, StatusCode> { if references_to_delete.is_empty() { - session_error!(self, "delete_references, called with no references to delete"); + session_error!( + self, + "delete_references, called with no references to delete" + ); Err(StatusCode::BadNothingToDo) } else { let request = DeleteReferencesRequest { @@ -1260,7 +1406,10 @@ impl Session { /// [`BrowseDescription`]: ./struct.BrowseDescription.html /// [`BrowseResult`]: ./struct.BrowseResult.html /// - pub fn browse(&mut self, nodes_to_browse: &[BrowseDescription]) -> Result>, StatusCode> { + pub fn browse( + &mut self, + nodes_to_browse: &[BrowseDescription], + ) -> Result>, StatusCode> { if nodes_to_browse.is_empty() { session_error!(self, "browse, was not supplied with any nodes to browse"); Err(StatusCode::BadNothingToDo) @@ -1307,9 +1456,16 @@ impl Session { /// [`BrowseNextRequest`]: ./struct.BrowseNextRequest.html /// [`BrowseResult`]: ./struct.BrowseResult.html /// - pub fn browse_next(&mut self, release_continuation_points: bool, continuation_points: &[ByteString]) -> Result>, StatusCode> { + pub fn browse_next( + &mut self, + release_continuation_points: bool, + continuation_points: &[ByteString], + ) -> Result>, StatusCode> { if continuation_points.is_empty() { - session_error!(self, "browse_next, was not supplied with any continuation points"); + session_error!( + self, + "browse_next, was not supplied with any continuation points" + ); Err(StatusCode::BadNothingToDo) } else { let request = BrowseNextRequest { @@ -1347,9 +1503,15 @@ impl Session { /// /// [`RegisterNodesRequest`]: ./struct.RegisterNodesRequest.html /// [`NodeId`]: ./struct.NodeId.html - pub fn register_nodes(&mut self, nodes_to_register: &[NodeId]) -> Result, StatusCode> { + pub fn register_nodes( + &mut self, + nodes_to_register: &[NodeId], + ) -> Result, StatusCode> { if nodes_to_register.is_empty() { - session_error!(self, "register_nodes, was not supplied with any nodes to register"); + session_error!( + self, + "register_nodes, was not supplied with any nodes to register" + ); Err(StatusCode::BadNothingToDo) } else { let request = RegisterNodesRequest { @@ -1388,7 +1550,10 @@ impl Session { /// pub fn unregister_nodes(&mut self, nodes_to_unregister: &[NodeId]) -> Result<(), StatusCode> { if nodes_to_unregister.is_empty() { - session_error!(self, "unregister_nodes, was not supplied with any nodes to unregister"); + session_error!( + self, + "unregister_nodes, was not supplied with any nodes to unregister" + ); Err(StatusCode::BadNothingToDo) } else { let request = UnregisterNodesRequest { @@ -1480,16 +1645,28 @@ impl Session { /// * `Ok(Vec)` - A list of `HistoryReadResult` results corresponding to history read operation. /// * `Err(StatusCode)` - Status code reason for failure. /// - pub fn history_read(&mut self, history_read_details: ExtensionObject, timestamps_to_return: TimestampsToReturn, release_continuation_points: bool, nodes_to_read: &[HistoryReadValueId]) -> Result, StatusCode> { + pub fn history_read( + &mut self, + history_read_details: ExtensionObject, + timestamps_to_return: TimestampsToReturn, + release_continuation_points: bool, + nodes_to_read: &[HistoryReadValueId], + ) -> Result, StatusCode> { // Validate the read operation - let valid_operation = Self::node_id_is_one_of(&history_read_details.node_id, &[ - ObjectId::ReadEventDetails_Encoding_DefaultBinary, - ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary, - ObjectId::ReadProcessedDetails_Encoding_DefaultBinary, - ObjectId::ReadAtTimeDetails_Encoding_DefaultBinary, - ]); + let valid_operation = Self::node_id_is_one_of( + &history_read_details.node_id, + &[ + ObjectId::ReadEventDetails_Encoding_DefaultBinary, + ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary, + ObjectId::ReadProcessedDetails_Encoding_DefaultBinary, + ObjectId::ReadAtTimeDetails_Encoding_DefaultBinary, + ], + ); if !valid_operation { - session_error!(self, "history_read(), was called with an invalid history update operation"); + session_error!( + self, + "history_read(), was called with an invalid history update operation" + ); Err(StatusCode::BadHistoryOperationUnsupported) } else { let request = HistoryReadRequest { @@ -1503,7 +1680,11 @@ impl Session { Some(nodes_to_read.to_vec()) }, }; - session_debug!(self, "history_read() requested to read nodes {:?}", nodes_to_read); + session_debug!( + self, + "history_read() requested to read nodes {:?}", + nodes_to_read + ); let response = self.send_request(request)?; if let SupportedMessage::HistoryReadResponse(response) = response { session_debug!(self, "history_read(), success"); @@ -1538,7 +1719,10 @@ impl Session { /// [`WriteRequest`]: ./struct.WriteRequest.html /// [`WriteValue`]: ./struct.WriteValue.html /// - pub fn write(&mut self, nodes_to_write: &[WriteValue]) -> Result>, StatusCode> { + pub fn write( + &mut self, + nodes_to_write: &[WriteValue], + ) -> Result>, StatusCode> { if nodes_to_write.is_empty() { // No subscriptions session_error!(self, "write() was not supplied with any nodes to write"); @@ -1581,24 +1765,36 @@ impl Session { /// * `Ok(Vec)` - A list of `HistoryUpdateResult` results corresponding to history update operation. /// * `Err(StatusCode)` - Status code reason for failure. /// - pub fn history_update(&mut self, history_update_details: &[ExtensionObject]) -> Result, StatusCode> { + pub fn history_update( + &mut self, + history_update_details: &[ExtensionObject], + ) -> Result, StatusCode> { if history_update_details.is_empty() { // No subscriptions - session_error!(self, "history_update(), was not supplied with any detail to update"); + session_error!( + self, + "history_update(), was not supplied with any detail to update" + ); Err(StatusCode::BadNothingToDo) } else { let valid_operation = !history_update_details.iter().any(|h| { - !Self::node_id_is_one_of(&h.node_id, &[ - ObjectId::UpdateDataDetails_Encoding_DefaultBinary, - ObjectId::UpdateStructureDataDetails_Encoding_DefaultBinary, - ObjectId::UpdateEventDetails_Encoding_DefaultBinary, - ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary, - ObjectId::DeleteAtTimeDetails_Encoding_DefaultBinary, - ObjectId::DeleteEventDetails_Encoding_DefaultBinary - ]) + !Self::node_id_is_one_of( + &h.node_id, + &[ + ObjectId::UpdateDataDetails_Encoding_DefaultBinary, + ObjectId::UpdateStructureDataDetails_Encoding_DefaultBinary, + ObjectId::UpdateEventDetails_Encoding_DefaultBinary, + ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary, + ObjectId::DeleteAtTimeDetails_Encoding_DefaultBinary, + ObjectId::DeleteEventDetails_Encoding_DefaultBinary, + ], + ) }); if !valid_operation { - session_error!(self, "history_update(), was called with an invalid history update operation"); + session_error!( + self, + "history_update(), was called with an invalid history update operation" + ); Err(StatusCode::BadHistoryOperationUnsupported) } else { let request = HistoryUpdateRequest { @@ -1646,7 +1842,10 @@ impl Session { /// [`CallMethodRequest`]: ./struct.CallMethodRequest.html /// [`CallMethodResult`]: ./struct.CallMethodResult.html /// - pub fn call(&mut self, method: T) -> Result where T: Into { + pub fn call(&mut self, method: T) -> Result + where + T: Into, + { session_debug!(self, "call()"); let methods_to_call = Some(vec![method.into()]); let request = CallRequest { @@ -1657,13 +1856,20 @@ impl Session { if let SupportedMessage::CallResponse(response) = response { if let Some(mut results) = response.results { if results.len() != 1 { - session_error!(self, "call(), expecting a result from the call to the server, got {} results", results.len()); + session_error!( + self, + "call(), expecting a result from the call to the server, got {} results", + results.len() + ); Err(StatusCode::BadUnexpectedError) } else { Ok(results.remove(0)) } } else { - session_error!(self, "call(), expecting a result from the call to the server, got nothing"); + session_error!( + self, + "call(), expecting a result from the call to the server, got nothing" + ); Err(StatusCode::BadUnexpectedError) } } else { @@ -1682,7 +1888,10 @@ impl Session { /// * `Ok((Vec, Vec))` - Result for call, consisting a list of (monitored_item_id, client_handle) /// * `Err(StatusCode)` - Status code reason for failure. /// - pub fn call_get_monitored_items(&mut self, subscription_id: u32) -> Result<(Vec, Vec), StatusCode> { + pub fn call_get_monitored_items( + &mut self, + subscription_id: u32, + ) -> Result<(Vec, Vec), StatusCode> { let args = Some(vec![Variant::from(subscription_id)]); let object_id: NodeId = ObjectId::Server.into(); let method_id: NodeId = MethodId::Server_GetMonitoredItems.into(); @@ -1690,8 +1899,10 @@ impl Session { let response = self.call(request)?; if let Some(mut result) = response.output_arguments { if result.len() == 2 { - let server_handles = >::try_from(&result.remove(0)).map_err(|_| StatusCode::BadUnexpectedError)?; - let client_handles = >::try_from(&result.remove(0)).map_err(|_| StatusCode::BadUnexpectedError)?; + let server_handles = >::try_from(&result.remove(0)) + .map_err(|_| StatusCode::BadUnexpectedError)?; + let client_handles = >::try_from(&result.remove(0)) + .map_err(|_| StatusCode::BadUnexpectedError)?; Ok((server_handles, client_handles)) } else { session_error!(self, "Expected a result with 2 args and didn't get it."); @@ -1727,16 +1938,33 @@ impl Session { /// [`MonitoredItemCreateRequest`]: ./struct.MonitoredItemCreateRequest.html /// [`MonitoredItemCreateResult`]: ./struct.MonitoredItemCreateResult.html /// - pub fn create_monitored_items(&mut self, subscription_id: u32, timestamps_to_return: TimestampsToReturn, items_to_create: &[MonitoredItemCreateRequest]) -> Result, StatusCode> { - session_debug!(self, "create_monitored_items, for subscription {}, {} items", subscription_id, items_to_create.len()); + pub fn create_monitored_items( + &mut self, + subscription_id: u32, + timestamps_to_return: TimestampsToReturn, + items_to_create: &[MonitoredItemCreateRequest], + ) -> Result, StatusCode> { + session_debug!( + self, + "create_monitored_items, for subscription {}, {} items", + subscription_id, + items_to_create.len() + ); if subscription_id == 0 { session_error!(self, "create_monitored_items, subscription id 0 is invalid"); Err(StatusCode::BadInvalidArgument) } else if !self.subscription_exists(subscription_id) { - session_error!(self, "create_monitored_items, subscription id {} does not exist", subscription_id); + session_error!( + self, + "create_monitored_items, subscription id {} does not exist", + subscription_id + ); Err(StatusCode::BadInvalidArgument) } else if items_to_create.is_empty() { - session_error!(self, "create_monitored_items, called with no items to create"); + session_error!( + self, + "create_monitored_items, called with no items to create" + ); Err(StatusCode::BadNothingToDo) } else { // Assign each item a unique client handle @@ -1746,7 +1974,8 @@ impl Session { items_to_create.iter_mut().for_each(|i| { //if user doesn't specify a valid client_handle if i.requested_parameters.client_handle == 0 { - i.requested_parameters.client_handle = session_state.next_monitored_item_handle(); + i.requested_parameters.client_handle = + session_state.next_monitored_item_handle(); } }); } @@ -1761,28 +1990,36 @@ impl Session { if let SupportedMessage::CreateMonitoredItemsResponse(response) = response { crate::process_service_result(&response.response_header)?; if let Some(ref results) = response.results { - session_debug!(self, "create_monitored_items, {} items created", items_to_create.len()); + session_debug!( + self, + "create_monitored_items, {} items created", + items_to_create.len() + ); // Set the items in our internal state - let items_to_create = items_to_create.iter() + let items_to_create = items_to_create + .iter() .zip(results) - .map(|(i, r)| { - subscription::CreateMonitoredItem { - id: r.monitored_item_id, - client_handle: i.requested_parameters.client_handle, - discard_oldest: i.requested_parameters.discard_oldest, - item_to_monitor: i.item_to_monitor.clone(), - monitoring_mode: i.monitoring_mode, - queue_size: r.revised_queue_size, - sampling_interval: r.revised_sampling_interval, - } + .map(|(i, r)| subscription::CreateMonitoredItem { + id: r.monitored_item_id, + client_handle: i.requested_parameters.client_handle, + discard_oldest: i.requested_parameters.discard_oldest, + item_to_monitor: i.item_to_monitor.clone(), + monitoring_mode: i.monitoring_mode, + queue_size: r.revised_queue_size, + sampling_interval: r.revised_sampling_interval, }) .collect::>(); { - let mut subscription_state = trace_write_lock_unwrap!(self.subscription_state); - subscription_state.insert_monitored_items(subscription_id, &items_to_create); + let mut subscription_state = + trace_write_lock_unwrap!(self.subscription_state); + subscription_state + .insert_monitored_items(subscription_id, &items_to_create); } } else { - session_debug!(self, "create_monitored_items, success but no monitored items were created"); + session_debug!( + self, + "create_monitored_items, success but no monitored items were created" + ); } Ok(response.results.unwrap()) } else { @@ -1812,19 +2049,37 @@ impl Session { /// [`MonitoredItemModifyRequest`]: ./struct.MonitoredItemModifyRequest.html /// [`MonitoredItemModifyResult`]: ./struct.MonitoredItemModifyResult.html /// - pub fn modify_monitored_items(&mut self, subscription_id: u32, timestamps_to_return: TimestampsToReturn, items_to_modify: &[MonitoredItemModifyRequest]) -> Result, StatusCode> { - session_debug!(self, "modify_monitored_items, for subscription {}, {} items", subscription_id, items_to_modify.len()); + pub fn modify_monitored_items( + &mut self, + subscription_id: u32, + timestamps_to_return: TimestampsToReturn, + items_to_modify: &[MonitoredItemModifyRequest], + ) -> Result, StatusCode> { + session_debug!( + self, + "modify_monitored_items, for subscription {}, {} items", + subscription_id, + items_to_modify.len() + ); if subscription_id == 0 { session_error!(self, "modify_monitored_items, subscription id 0 is invalid"); Err(StatusCode::BadInvalidArgument) } else if !self.subscription_exists(subscription_id) { - session_error!(self, "modify_monitored_items, subscription id {} does not exist", subscription_id); + session_error!( + self, + "modify_monitored_items, subscription id {} does not exist", + subscription_id + ); Err(StatusCode::BadInvalidArgument) } else if items_to_modify.is_empty() { - session_error!(self, "modify_monitored_items, called with no items to modify"); + session_error!( + self, + "modify_monitored_items, called with no items to modify" + ); Err(StatusCode::BadNothingToDo) } else { - let monitored_item_ids = items_to_modify.iter() + let monitored_item_ids = items_to_modify + .iter() .map(|i| i.monitored_item_id) .collect::>(); let request = ModifyMonitoredItemsRequest { @@ -1838,19 +2093,20 @@ impl Session { crate::process_service_result(&response.response_header)?; if let Some(ref results) = response.results { // Set the items in our internal state - let items_to_modify = monitored_item_ids.iter() + let items_to_modify = monitored_item_ids + .iter() .zip(results.iter()) - .map(|(id, r)| { - subscription::ModifyMonitoredItem { - id: *id, - queue_size: r.revised_queue_size, - sampling_interval: r.revised_sampling_interval, - } + .map(|(id, r)| subscription::ModifyMonitoredItem { + id: *id, + queue_size: r.revised_queue_size, + sampling_interval: r.revised_sampling_interval, }) .collect::>(); { - let mut subscription_state = trace_write_lock_unwrap!(self.subscription_state); - subscription_state.modify_monitored_items(subscription_id, &items_to_modify); + let mut subscription_state = + trace_write_lock_unwrap!(self.subscription_state); + subscription_state + .modify_monitored_items(subscription_id, &items_to_modify); } } session_debug!(self, "modify_monitored_items, success"); @@ -1880,7 +2136,12 @@ impl Session { /// /// [`SetMonitoringModeRequest`]: ./struct.SetMonitoringModeRequest.html /// - pub fn set_monitoring_mode(&mut self, subscription_id: u32, monitoring_mode: MonitoringMode, monitored_item_ids: &[u32]) -> Result, StatusCode> { + pub fn set_monitoring_mode( + &mut self, + subscription_id: u32, + monitoring_mode: MonitoringMode, + monitored_item_ids: &[u32], + ) -> Result, StatusCode> { if monitored_item_ids.is_empty() { session_error!(self, "set_monitoring_mode, called with nothing to do"); Err(StatusCode::BadNothingToDo) @@ -1924,14 +2185,28 @@ impl Session { /// /// [`SetTriggeringRequest`]: ./struct.SetTriggeringRequest.html /// - pub fn set_triggering(&mut self, subscription_id: u32, triggering_item_id: u32, links_to_add: &[u32], links_to_remove: &[u32]) -> Result<(Option>, Option>), StatusCode> { + pub fn set_triggering( + &mut self, + subscription_id: u32, + triggering_item_id: u32, + links_to_add: &[u32], + links_to_remove: &[u32], + ) -> Result<(Option>, Option>), StatusCode> { if links_to_add.is_empty() && links_to_remove.is_empty() { session_error!(self, "set_triggering, called with nothing to add or remove"); Err(StatusCode::BadNothingToDo) } else { let request = { - let links_to_add = if links_to_add.is_empty() { None } else { Some(links_to_add.to_vec()) }; - let links_to_remove = if links_to_remove.is_empty() { None } else { Some(links_to_remove.to_vec()) }; + let links_to_add = if links_to_add.is_empty() { + None + } else { + Some(links_to_add.to_vec()) + }; + let links_to_remove = if links_to_remove.is_empty() { + None + } else { + Some(links_to_remove.to_vec()) + }; SetTriggeringRequest { request_header: self.make_request_header(), subscription_id, @@ -1944,7 +2219,12 @@ impl Session { if let SupportedMessage::SetTriggeringResponse(response) = response { // Update client side state let mut subscription_state = trace_write_lock_unwrap!(self.subscription_state); - subscription_state.set_triggering(subscription_id, triggering_item_id, links_to_add, links_to_remove); + subscription_state.set_triggering( + subscription_id, + triggering_item_id, + links_to_add, + links_to_remove, + ); Ok((response.add_results, response.remove_results)) } else { session_error!(self, "set_triggering failed {:?}", response); @@ -1970,16 +2250,32 @@ impl Session { /// /// [`DeleteMonitoredItemsRequest`]: ./struct.DeleteMonitoredItemsRequest.html /// - pub fn delete_monitored_items(&mut self, subscription_id: u32, items_to_delete: &[u32]) -> Result, StatusCode> { - session_debug!(self, "delete_monitored_items, subscription {} for {} items", subscription_id, items_to_delete.len()); + pub fn delete_monitored_items( + &mut self, + subscription_id: u32, + items_to_delete: &[u32], + ) -> Result, StatusCode> { + session_debug!( + self, + "delete_monitored_items, subscription {} for {} items", + subscription_id, + items_to_delete.len() + ); if subscription_id == 0 { session_error!(self, "delete_monitored_items, subscription id 0 is invalid"); Err(StatusCode::BadInvalidArgument) } else if !self.subscription_exists(subscription_id) { - session_error!(self, "delete_monitored_items, subscription id {} does not exist", subscription_id); + session_error!( + self, + "delete_monitored_items, subscription id {} does not exist", + subscription_id + ); Err(StatusCode::BadInvalidArgument) } else if items_to_delete.is_empty() { - session_error!(self, "delete_monitored_items, called with no items to delete"); + session_error!( + self, + "delete_monitored_items, called with no items to delete" + ); Err(StatusCode::BadNothingToDo) } else { let request = DeleteMonitoredItemsRequest { @@ -2050,18 +2346,41 @@ impl Session { /// /// [`CreateSubscriptionRequest`]: ./struct.CreateSubscriptionRequest.html /// - pub fn create_subscription(&mut self, publishing_interval: f64, lifetime_count: u32, max_keep_alive_count: u32, max_notifications_per_publish: u32, priority: u8, publishing_enabled: bool, callback: CB) - -> Result - where CB: OnSubscriptionNotification + Send + Sync + 'static { - self.create_subscription_inner(publishing_interval, lifetime_count, max_keep_alive_count, max_notifications_per_publish, priority, publishing_enabled, Arc::new(Mutex::new(callback))) + pub fn create_subscription( + &mut self, + publishing_interval: f64, + lifetime_count: u32, + max_keep_alive_count: u32, + max_notifications_per_publish: u32, + priority: u8, + publishing_enabled: bool, + callback: CB, + ) -> Result + where + CB: OnSubscriptionNotification + Send + Sync + 'static, + { + self.create_subscription_inner( + publishing_interval, + lifetime_count, + max_keep_alive_count, + max_notifications_per_publish, + priority, + publishing_enabled, + Arc::new(Mutex::new(callback)), + ) } /// This is the internal handler for create subscription that receives the callback wrapped up and reference counted. - fn create_subscription_inner(&mut self, publishing_interval: f64, lifetime_count: u32, max_keep_alive_count: u32, max_notifications_per_publish: u32, - priority: u8, publishing_enabled: bool, - callback: Arc>) - -> Result - { + fn create_subscription_inner( + &mut self, + publishing_interval: f64, + lifetime_count: u32, + max_keep_alive_count: u32, + max_notifications_per_publish: u32, + priority: u8, + publishing_enabled: bool, + callback: Arc>, + ) -> Result { let request = CreateSubscriptionRequest { request_header: self.make_request_header(), requested_publishing_interval: publishing_interval, @@ -2074,13 +2393,16 @@ impl Session { let response = self.send_request(request)?; if let SupportedMessage::CreateSubscriptionResponse(response) = response { crate::process_service_result(&response.response_header)?; - let subscription = Subscription::new(response.subscription_id, response.revised_publishing_interval, - response.revised_lifetime_count, - response.revised_max_keep_alive_count, - max_notifications_per_publish, - publishing_enabled, - priority, - callback); + let subscription = Subscription::new( + response.subscription_id, + response.revised_publishing_interval, + response.revised_lifetime_count, + response.revised_max_keep_alive_count, + max_notifications_per_publish, + publishing_enabled, + priority, + callback, + ); { let subscription_id = { @@ -2089,9 +2411,15 @@ impl Session { subscription_state.add_subscription(subscription); subscription_id }; - let _ = self.timer_command_queue.unbounded_send(SubscriptionTimerCommand::CreateTimer(subscription_id)); + let _ = self + .timer_command_queue + .unbounded_send(SubscriptionTimerCommand::CreateTimer(subscription_id)); } - session_debug!(self, "create_subscription, created a subscription with id {}", response.subscription_id); + session_debug!( + self, + "create_subscription, created a subscription with id {}", + response.subscription_id + ); Ok(response.subscription_id) } else { session_error!(self, "create_subscription failed {:?}", response); @@ -2116,7 +2444,15 @@ impl Session { /// /// [`ModifySubscriptionRequest`]: ./struct.ModifySubscriptionRequest.html /// - pub fn modify_subscription(&mut self, subscription_id: u32, publishing_interval: f64, lifetime_count: u32, max_keep_alive_count: u32, max_notifications_per_publish: u32, priority: u8) -> Result<(), StatusCode> { + pub fn modify_subscription( + &mut self, + subscription_id: u32, + publishing_interval: f64, + lifetime_count: u32, + max_keep_alive_count: u32, + max_notifications_per_publish: u32, + priority: u8, + ) -> Result<(), StatusCode> { if subscription_id == 0 { session_error!(self, "modify_subscription, subscription id must be non-zero, or the subscription is considered invalid"); Err(StatusCode::BadInvalidArgument) @@ -2137,12 +2473,14 @@ impl Session { if let SupportedMessage::ModifySubscriptionResponse(response) = response { crate::process_service_result(&response.response_header)?; let mut subscription_state = trace_write_lock_unwrap!(self.subscription_state); - subscription_state.modify_subscription(subscription_id, - response.revised_publishing_interval, - response.revised_lifetime_count, - response.revised_max_keep_alive_count, - max_notifications_per_publish, - priority); + subscription_state.modify_subscription( + subscription_id, + response.revised_publishing_interval, + response.revised_lifetime_count, + response.revised_max_keep_alive_count, + max_notifications_per_publish, + priority, + ); session_debug!(self, "modify_subscription success for {}", subscription_id); Ok(()) } else { @@ -2169,11 +2507,23 @@ impl Session { /// /// [`SetPublishingModeRequest`]: ./struct.SetPublishingModeRequest.html /// - pub fn set_publishing_mode(&mut self, subscription_ids: &[u32], publishing_enabled: bool) -> Result, StatusCode> { - session_debug!(self, "set_publishing_mode, for subscriptions {:?}, publishing enabled {}", subscription_ids, publishing_enabled); + pub fn set_publishing_mode( + &mut self, + subscription_ids: &[u32], + publishing_enabled: bool, + ) -> Result, StatusCode> { + session_debug!( + self, + "set_publishing_mode, for subscriptions {:?}, publishing enabled {}", + subscription_ids, + publishing_enabled + ); if subscription_ids.is_empty() { // No subscriptions - session_error!(self, "set_publishing_mode, no subscription ids were provided"); + session_error!( + self, + "set_publishing_mode, no subscription ids were provided" + ); Err(StatusCode::BadNothingToDo) } else { let request = SetPublishingModeRequest { @@ -2219,10 +2569,17 @@ impl Session { /// [`TransferSubscriptionsRequest`]: ./struct.TransferSubscriptionsRequest.html /// [`TransferResult`]: ./struct.TransferResult.html /// - pub fn transfer_subscriptions(&mut self, subscription_ids: &[u32], send_initial_values: bool) -> Result, StatusCode> { + pub fn transfer_subscriptions( + &mut self, + subscription_ids: &[u32], + send_initial_values: bool, + ) -> Result, StatusCode> { if subscription_ids.is_empty() { // No subscriptions - session_error!(self, "set_publishing_mode, no subscription ids were provided"); + session_error!( + self, + "set_publishing_mode, no subscription ids were provided" + ); Err(StatusCode::BadNothingToDo) } else { let request = TransferSubscriptionsRequest { @@ -2262,7 +2619,11 @@ impl Session { session_error!(self, "delete_subscription, subscription id 0 is invalid"); Err(StatusCode::BadInvalidArgument) } else if !self.subscription_exists(subscription_id) { - session_error!(self, "delete_subscription, subscription id {} does not exist", subscription_id); + session_error!( + self, + "delete_subscription, subscription id {} does not exist", + subscription_id + ); Err(StatusCode::BadInvalidArgument) } else { let result = self.delete_subscriptions(&[subscription_id][..])?; @@ -2287,7 +2648,10 @@ impl Session { /// /// [`DeleteSubscriptionsRequest`]: ./struct.DeleteSubscriptionsRequest.html /// - pub fn delete_subscriptions(&mut self, subscription_ids: &[u32]) -> Result, StatusCode> { + pub fn delete_subscriptions( + &mut self, + subscription_ids: &[u32], + ) -> Result, StatusCode> { if subscription_ids.is_empty() { // No subscriptions session_trace!(self, "delete_subscriptions with no subscriptions"); @@ -2335,10 +2699,17 @@ impl Session { if let Some(ref subscription_ids) = subscription_ids { let status_codes = self.delete_subscriptions(subscription_ids.as_slice())?; // Return a list of (id, status_code) for each subscription - Ok(subscription_ids.iter().zip(status_codes).map(|(id, status_code)| (*id, status_code)).collect()) + Ok(subscription_ids + .iter() + .zip(status_codes) + .map(|(id, status_code)| (*id, status_code)) + .collect()) } else { // No subscriptions - session_trace!(self, "delete_all_subscriptions, called when there are no subscriptions"); + session_trace!( + self, + "delete_all_subscriptions, called when there are no subscriptions" + ); Err(StatusCode::BadNothingToDo) } } @@ -2374,20 +2745,30 @@ impl Session { } /// Synchronously sends a request. The return value is the response to the request - fn send_request(&mut self, request: T) -> Result where T: Into { + fn send_request(&mut self, request: T) -> Result + where + T: Into, + { let mut session_state = trace_write_lock_unwrap!(self.session_state); session_state.send_request(request) } /// Asynchronously sends a request. The return value is the request handle of the request - fn async_send_request(&mut self, request: T, is_async: bool) -> Result where T: Into { + fn async_send_request(&mut self, request: T, is_async: bool) -> Result + where + T: Into, + { let mut session_state = trace_write_lock_unwrap!(self.session_state); session_state.async_send_request(request, is_async) } // Creates a user identity token according to the endpoint, policy that the client is currently connected to the // server with. - fn user_identity_token(&self, server_cert: &Option, server_nonce: &[u8]) -> Result<(ExtensionObject, SignatureData), StatusCode> { + fn user_identity_token( + &self, + server_cert: &Option, + server_nonce: &[u8], + ) -> Result<(ExtensionObject, SignatureData), StatusCode> { let user_identity_token = &self.session_info.user_identity_token; let user_token_type = match user_identity_token { client::IdentityToken::Anonymous => UserTokenType::Anonymous, @@ -2402,7 +2783,11 @@ impl Session { // Return the result match policy { None => { - session_error!(self, "Cannot find user token type {:?} for this endpoint, cannot connect", user_token_type); + session_error!( + self, + "Cannot find user token type {:?} for this endpoint, cannot connect", + user_token_type + ); Err(StatusCode::BadSecurityPolicyRejected) } Some(policy) => { @@ -2413,7 +2798,11 @@ impl Session { SecurityPolicy::from_uri(policy.security_policy_uri.as_ref()) }; if security_policy == SecurityPolicy::Unknown { - session_error!(self, "Can't support the security policy {}", policy.security_policy_uri); + session_error!( + self, + "Can't support the security policy {}", + policy.security_policy_uri + ); Err(StatusCode::BadSecurityPolicyRejected) } else { match user_identity_token { @@ -2421,36 +2810,67 @@ impl Session { let identity_token = AnonymousIdentityToken { policy_id: policy.policy_id.clone(), }; - let identity_token = ExtensionObject::from_encodable(ObjectId::AnonymousIdentityToken_Encoding_DefaultBinary, &identity_token); + let identity_token = ExtensionObject::from_encodable( + ObjectId::AnonymousIdentityToken_Encoding_DefaultBinary, + &identity_token, + ); Ok((identity_token, SignatureData::null())) } client::IdentityToken::UserName(ref user, ref pass) => { let secure_channel = trace_read_lock_unwrap!(self.secure_channel); - let identity_token = self.make_user_name_identity_token(&secure_channel, policy, user, pass)?; - let identity_token = ExtensionObject::from_encodable(ObjectId::UserNameIdentityToken_Encoding_DefaultBinary, &identity_token); + let identity_token = self.make_user_name_identity_token( + &secure_channel, + policy, + user, + pass, + )?; + let identity_token = ExtensionObject::from_encodable( + ObjectId::UserNameIdentityToken_Encoding_DefaultBinary, + &identity_token, + ); Ok((identity_token, SignatureData::null())) } client::IdentityToken::X509(ref cert_path, ref private_key_path) => { if let Some(ref server_cert) = server_cert { // The cert will be supplied to the server along with a signature to prove we have the private key to go with the cert - let certificate_data = CertificateStore::read_cert(cert_path).map_err(|e| { - session_error!(self, "Certificate cannot be loaded from path {}, error = {}", cert_path.to_str().unwrap(), e); - StatusCode::BadSecurityPolicyRejected - })?; - let private_key = CertificateStore::read_pkey(private_key_path).map_err(|e| { - session_error!(self, "Private key cannot be loaded from path {}, error = {}", private_key_path.to_str().unwrap(), e); - StatusCode::BadSecurityPolicyRejected - })?; + let certificate_data = CertificateStore::read_cert(cert_path) + .map_err(|e| { + session_error!( + self, + "Certificate cannot be loaded from path {}, error = {}", + cert_path.to_str().unwrap(), + e + ); + StatusCode::BadSecurityPolicyRejected + })?; + let private_key = CertificateStore::read_pkey(private_key_path) + .map_err(|e| { + session_error!( + self, + "Private key cannot be loaded from path {}, error = {}", + private_key_path.to_str().unwrap(), + e + ); + StatusCode::BadSecurityPolicyRejected + })?; // Create a signature using the X509 private key to sign the server's cert and nonce - let user_token_signature = crypto::create_signature_data(&private_key, security_policy, &server_cert.as_byte_string(), &ByteString::from(server_nonce))?; + let user_token_signature = crypto::create_signature_data( + &private_key, + security_policy, + &server_cert.as_byte_string(), + &ByteString::from(server_nonce), + )?; // Create identity token let identity_token = X509IdentityToken { policy_id: policy.policy_id.clone(), certificate_data: certificate_data.as_byte_string(), }; - let identity_token = ExtensionObject::from_encodable(ObjectId::X509IdentityToken_Encoding_DefaultBinary, &identity_token); + let identity_token = ExtensionObject::from_encodable( + ObjectId::X509IdentityToken_Encoding_DefaultBinary, + &identity_token, + ); Ok((identity_token, user_token_signature)) } else { @@ -2466,11 +2886,24 @@ impl Session { /// Create a filled in UserNameIdentityToken by using the endpoint's token policy, the current /// secure channel information and the user name and password. - fn make_user_name_identity_token(&self, secure_channel: &SecureChannel, user_token_policy: &UserTokenPolicy, user: &str, pass: &str) -> Result { + fn make_user_name_identity_token( + &self, + secure_channel: &SecureChannel, + user_token_policy: &UserTokenPolicy, + user: &str, + pass: &str, + ) -> Result { let channel_security_policy = secure_channel.security_policy(); let nonce = secure_channel.remote_nonce(); let cert = secure_channel.remote_cert(); - make_user_name_identity_token(channel_security_policy, user_token_policy, nonce, &cert, user, pass) + make_user_name_identity_token( + channel_security_policy, + user_token_policy, + nonce, + &cert, + user, + pass, + ) } /// Construct a request header for the session. All requests after create session are expected @@ -2527,21 +2960,35 @@ impl Session { }; // Process data change notifications - if let Some((data_change_notifications, events)) = notification_message.notifications(&decoding_limits) { - session_debug!(self, "Received notifications, data changes = {}, events = {}", data_change_notifications.len(), events.len()); + if let Some((data_change_notifications, events)) = + notification_message.notifications(&decoding_limits) + { + session_debug!( + self, + "Received notifications, data changes = {}, events = {}", + data_change_notifications.len(), + events.len() + ); if !data_change_notifications.is_empty() { - let mut subscription_state = trace_write_lock_unwrap!(self.subscription_state); - subscription_state.on_data_change(subscription_id, &data_change_notifications); + let mut subscription_state = + trace_write_lock_unwrap!(self.subscription_state); + subscription_state + .on_data_change(subscription_id, &data_change_notifications); } if !events.is_empty() { - let mut subscription_state = trace_write_lock_unwrap!(self.subscription_state); + let mut subscription_state = + trace_write_lock_unwrap!(self.subscription_state); subscription_state.on_event(subscription_id, &events); } } } SupportedMessage::ServiceFault(response) => { let service_result = response.response_header.service_result; - session_debug!(self, "Service fault received with {} error code", service_result); + session_debug!( + self, + "Service fault received with {} error code", + service_result + ); session_trace!(self, "ServiceFault {:?}", response); match service_result { @@ -2550,8 +2997,7 @@ impl Session { wait_for_publish_response = true } StatusCode::BadSessionClosed | StatusCode::BadSessionIdInvalid => { - let mut session_state = - trace_write_lock_unwrap!(self.session_state); + let mut session_state = trace_write_lock_unwrap!(self.session_state); session_state.on_session_closed(service_result) } _ => (), @@ -2572,7 +3018,8 @@ impl Session { /// Test if the supplied node id matches one of the supplied object ids. i.e. it must be in namespace 0, /// and have a numeric value that matches the scalar value of the supplied enums. pub(crate) fn node_id_is_one_of(node_id: &NodeId, object_ids: &[ObjectId]) -> bool { - node_id.as_object_id() + node_id + .as_object_id() .map(|object_id| object_ids.iter().any(|v| object_id == *v)) .unwrap_or(false) } diff --git a/client/src/session_retry.rs b/client/src/session_retry.rs index 920bf7b23..6e5b6a33e 100644 --- a/client/src/session_retry.rs +++ b/client/src/session_retry.rs @@ -43,7 +43,11 @@ pub struct SessionRetryPolicy { impl Default for SessionRetryPolicy { fn default() -> Self { - Self::new(Self::DEFAULT_SESSION_TIMEOUT_MS, Self::DEFAULT_RETRY_LIMIT, Self::DEFAULT_RETRY_INTERVAL_MS) + Self::new( + Self::DEFAULT_SESSION_TIMEOUT_MS, + Self::DEFAULT_RETRY_LIMIT, + Self::DEFAULT_RETRY_INTERVAL_MS, + ) } } @@ -59,8 +63,16 @@ impl SessionRetryPolicy { /// Create a `SessionRetryPolicy` with a limit and interval pub fn new(session_timeout: f64, retry_limit: u32, retry_interval: u32) -> Self { - let session_timeout = if session_timeout == 0.0 { Self::DEFAULT_SESSION_TIMEOUT_MS } else { session_timeout }; - let retry_interval = if retry_interval < Self::MIN_RETRY_INTERVAL_MS { Self::MIN_RETRY_INTERVAL_MS } else { retry_interval }; + let session_timeout = if session_timeout == 0.0 { + Self::DEFAULT_SESSION_TIMEOUT_MS + } else { + session_timeout + }; + let retry_interval = if retry_interval < Self::MIN_RETRY_INTERVAL_MS { + Self::MIN_RETRY_INTERVAL_MS + } else { + retry_interval + }; SessionRetryPolicy { session_timeout, retry_count: 0, @@ -72,8 +84,16 @@ impl SessionRetryPolicy { /// Create a `SessionRetryPolicy` that tries forever at the specified interval pub fn infinity(session_timeout: f64, retry_interval: u32) -> Self { - let session_timeout = if session_timeout == 0.0 { Self::DEFAULT_SESSION_TIMEOUT_MS } else { session_timeout }; - let retry_interval = if retry_interval < Self::MIN_RETRY_INTERVAL_MS { Self::MIN_RETRY_INTERVAL_MS } else { retry_interval }; + let session_timeout = if session_timeout == 0.0 { + Self::DEFAULT_SESSION_TIMEOUT_MS + } else { + session_timeout + }; + let retry_interval = if retry_interval < Self::MIN_RETRY_INTERVAL_MS { + Self::MIN_RETRY_INTERVAL_MS + } else { + retry_interval + }; SessionRetryPolicy { session_timeout, retry_count: 0, @@ -146,11 +166,15 @@ fn session_retry() { let now = Utc::now(); - let retry_interval = Duration::milliseconds(SessionRetryPolicy::DEFAULT_RETRY_INTERVAL_MS as i64); + let retry_interval = + Duration::milliseconds(SessionRetryPolicy::DEFAULT_RETRY_INTERVAL_MS as i64); let last_attempt_expired = now - retry_interval - Duration::nanoseconds(1); let last_attempt_wait = now - retry_interval + Duration::seconds(1); - assert_eq!(session_retry.session_timeout(), SessionRetryPolicy::DEFAULT_SESSION_TIMEOUT_MS); + assert_eq!( + session_retry.session_timeout(), + SessionRetryPolicy::DEFAULT_SESSION_TIMEOUT_MS + ); session_retry.set_last_attempt(last_attempt_expired); assert_eq!(session_retry.should_retry_connect(now), Answer::Retry); @@ -161,7 +185,10 @@ fn session_retry() { session_retry.set_last_attempt(last_attempt_wait); session_retry.retry_count = 0; - assert_eq!(session_retry.should_retry_connect(now), Answer::WaitFor(1000)); + assert_eq!( + session_retry.should_retry_connect(now), + Answer::WaitFor(1000) + ); } #[test] diff --git a/client/src/session_state.rs b/client/src/session_state.rs index 08749fc65..f099886a0 100644 --- a/client/src/session_state.rs +++ b/client/src/session_state.rs @@ -5,8 +5,8 @@ use std::{ self, sync::{ - Arc, atomic::{AtomicU32, Ordering}, - RwLock, + atomic::{AtomicU32, Ordering}, + Arc, RwLock, }, u32, }; @@ -14,15 +14,10 @@ use std::{ use chrono; use opcua_core::{ - comms::secure_channel::SecureChannel, - handle::Handle, - supported_message::SupportedMessage, + comms::secure_channel::SecureChannel, handle::Handle, supported_message::SupportedMessage, }; use opcua_crypto::SecurityPolicy; -use opcua_types::{ - *, - status_code::StatusCode, -}; +use opcua_types::{status_code::StatusCode, *}; use crate::{ callbacks::{OnConnectionStatusChange, OnSessionClosed}, @@ -115,7 +110,10 @@ impl SessionState { /// Used for synchronous polling const SYNC_POLLING_PERIOD: u64 = 50; - pub fn new(secure_channel: Arc>, message_queue: Arc>) -> SessionState { + pub fn new( + secure_channel: Arc>, + message_queue: Arc>, + ) -> SessionState { let id = NEXT_SESSION_ID.fetch_add(1, Ordering::Relaxed); SessionState { id, @@ -169,8 +167,12 @@ impl SessionState { self.subscription_acknowledgements.drain(..).collect() } - pub fn add_subscription_acknowledgement(&mut self, subscription_acknowledgement: SubscriptionAcknowledgement) { - self.subscription_acknowledgements.push(subscription_acknowledgement); + pub fn add_subscription_acknowledgement( + &mut self, + subscription_acknowledgement: SubscriptionAcknowledgement, + ) { + self.subscription_acknowledgements + .push(subscription_acknowledgement); } //pub fn authentication_token(&self) -> &NodeId { @@ -181,11 +183,17 @@ impl SessionState { self.authentication_token = authentication_token; } - pub fn set_session_closed_callback(&mut self, session_closed_callback: CB) where CB: OnSessionClosed + Send + Sync + 'static { + pub fn set_session_closed_callback(&mut self, session_closed_callback: CB) + where + CB: OnSessionClosed + Send + Sync + 'static, + { self.session_closed_callback = Some(Box::new(session_closed_callback)); } - pub fn set_connection_status_callback(&mut self, connection_status_callback: CB) where CB: OnConnectionStatusChange + Send + Sync + 'static { + pub fn set_connection_status_callback(&mut self, connection_status_callback: CB) + where + CB: OnConnectionStatusChange + Send + Sync + 'static, + { self.connection_status_callback = Some(Box::new(connection_status_callback)); } @@ -207,7 +215,9 @@ impl SessionState { if self.wait_for_publish_response && !wait_for_publish_response { debug!("Publish requests are enabled again"); } else if !self.wait_for_publish_response && wait_for_publish_response { - debug!("Publish requests will be disabled until some publish responses start to arrive"); + debug!( + "Publish requests will be disabled until some publish responses start to arrive" + ); } self.wait_for_publish_response = wait_for_publish_response; } @@ -227,11 +237,21 @@ impl SessionState { } /// Sends a publish request containing acknowledgements for previous notifications. - pub fn async_publish(&mut self, subscription_acknowledgements: &[SubscriptionAcknowledgement]) -> Result { - debug!("async_publish with {} subscription acknowledgements", subscription_acknowledgements.len()); + pub fn async_publish( + &mut self, + subscription_acknowledgements: &[SubscriptionAcknowledgement], + ) -> Result { + debug!( + "async_publish with {} subscription acknowledgements", + subscription_acknowledgements.len() + ); let request = PublishRequest { request_header: self.make_request_header(), - subscription_acknowledgements: if subscription_acknowledgements.is_empty() { None } else { Some(subscription_acknowledgements.to_vec()) }, + subscription_acknowledgements: if subscription_acknowledgements.is_empty() { + None + } else { + Some(subscription_acknowledgements.to_vec()) + }, }; let request_handle = self.async_send_request(request, true)?; debug!("async_publish, request sent with handle {}", request_handle); @@ -239,7 +259,10 @@ impl SessionState { } /// Synchronously sends a request. The return value is the response to the request - pub(crate) fn send_request(&mut self, request: T) -> Result where T: Into { + pub(crate) fn send_request(&mut self, request: T) -> Result + where + T: Into, + { // Send the request let request_handle = self.async_send_request(request, false)?; // Wait for the response @@ -262,10 +285,18 @@ impl SessionState { } /// Asynchronously sends a request. The return value is the request handle of the request - pub(crate) fn async_send_request(&mut self, request: T, is_async: bool) -> Result where T: Into { + pub(crate) fn async_send_request( + &mut self, + request: T, + is_async: bool, + ) -> Result + where + T: Into, + { let request = request.into(); match request { - SupportedMessage::OpenSecureChannelRequest(_) | SupportedMessage::CloseSecureChannelRequest(_) => {} + SupportedMessage::OpenSecureChannelRequest(_) + | SupportedMessage::CloseSecureChannelRequest(_) => {} _ => { // Make sure secure channel token hasn't expired let _ = self.ensure_secure_channel_token(); @@ -290,7 +321,11 @@ impl SessionState { /// is performed and in fact the function is expected to receive no messages except asynchronous /// and housekeeping events from the server. A 0 handle will cause the wait to process at most /// one async message before returning. - fn wait_for_sync_response(&mut self, request_handle: u32, request_timeout: u32) -> Result { + fn wait_for_sync_response( + &mut self, + request_handle: u32, + request_timeout: u32, + ) -> Result { if request_handle == 0 { panic!("Request handle must be non zero"); } @@ -344,7 +379,10 @@ impl SessionState { } } - pub(crate) fn issue_or_renew_secure_channel(&mut self, request_type: SecurityTokenRequestType) -> Result<(), StatusCode> { + pub(crate) fn issue_or_renew_secure_channel( + &mut self, + request_type: SecurityTokenRequestType, + ) -> Result<(), StatusCode> { trace!("issue_or_renew_secure_channel({:?})", request_type); const REQUESTED_LIFETIME: u32 = 60000; // TODO @@ -353,7 +391,11 @@ impl SessionState { let mut secure_channel = trace_write_lock_unwrap!(self.secure_channel); let client_nonce = secure_channel.security_policy().random_nonce(); secure_channel.set_local_nonce(client_nonce.as_ref()); - (secure_channel.security_mode(), secure_channel.security_policy(), client_nonce) + ( + secure_channel.security_mode(), + secure_channel.security_policy(), + client_nonce, + ) }; info!("Making secure channel request"); @@ -376,7 +418,10 @@ impl SessionState { let mut secure_channel = trace_write_lock_unwrap!(self.secure_channel); secure_channel.set_security_token(response.security_token.clone()); - if security_policy != SecurityPolicy::None && (security_mode == MessageSecurityMode::Sign || security_mode == MessageSecurityMode::SignAndEncrypt) { + if security_policy != SecurityPolicy::None + && (security_mode == MessageSecurityMode::Sign + || security_mode == MessageSecurityMode::SignAndEncrypt) + { secure_channel.set_remote_nonce_from_byte_string(&response.server_nonce)?; secure_channel.derive_keys(); } diff --git a/client/src/subscription.rs b/client/src/subscription.rs index 34e5da552..a9fcd0fce 100644 --- a/client/src/subscription.rs +++ b/client/src/subscription.rs @@ -18,7 +18,10 @@ use std::{ sync::{Arc, Mutex}, }; -use opcua_types::{*, service_types::{DataChangeNotification, ReadValueId}}; +use opcua_types::{ + service_types::{DataChangeNotification, ReadValueId}, + *, +}; use crate::callbacks::OnSubscriptionNotification; @@ -80,25 +83,37 @@ impl MonitoredItem { } } - pub fn id(&self) -> u32 { self.id } + pub fn id(&self) -> u32 { + self.id + } pub fn client_handle(&self) -> u32 { self.client_handle } - pub fn item_to_monitor(&self) -> &ReadValueId { &self.item_to_monitor } + pub fn item_to_monitor(&self) -> &ReadValueId { + &self.item_to_monitor + } - pub fn sampling_interval(&self) -> f64 { self.sampling_interval } + pub fn sampling_interval(&self) -> f64 { + self.sampling_interval + } - pub fn queue_size(&self) -> u32 { self.queue_size } + pub fn queue_size(&self) -> u32 { + self.queue_size + } pub fn value(&self) -> &DataValue { &self.value } - pub fn monitoring_mode(&self) -> MonitoringMode { self.monitoring_mode } + pub fn monitoring_mode(&self) -> MonitoringMode { + self.monitoring_mode + } - pub fn discard_oldest(&self) -> bool { self.discard_oldest } + pub fn discard_oldest(&self) -> bool { + self.discard_oldest + } pub(crate) fn set_id(&mut self, value: u32) { self.id = value; @@ -125,8 +140,12 @@ impl MonitoredItem { } pub(crate) fn set_triggering(&mut self, links_to_add: &[u32], links_to_remove: &[u32]) { - links_to_remove.iter().for_each(|i| { self.triggered_items.remove(i); }); - links_to_add.iter().for_each(|i| { self.triggered_items.insert(*i); }); + links_to_remove.iter().for_each(|i| { + self.triggered_items.remove(i); + }); + links_to_add.iter().for_each(|i| { + self.triggered_items.insert(*i); + }); } pub(crate) fn triggered_items(&self) -> &BTreeSet { @@ -160,10 +179,16 @@ pub struct Subscription { impl Subscription { /// Creates a new subscription using the supplied parameters and the supplied data change callback. - pub fn new(subscription_id: u32, publishing_interval: f64, lifetime_count: u32, max_keep_alive_count: u32, max_notifications_per_publish: u32, - publishing_enabled: bool, priority: u8, notification_callback: Arc>) - -> Subscription - { + pub fn new( + subscription_id: u32, + publishing_interval: f64, + lifetime_count: u32, + max_keep_alive_count: u32, + max_notifications_per_publish: u32, + publishing_enabled: bool, + priority: u8, + notification_callback: Arc>, + ) -> Subscription { Subscription { subscription_id, publishing_interval, @@ -178,35 +203,67 @@ impl Subscription { } } - pub fn monitored_items(&self) -> &HashMap { &self.monitored_items } + pub fn monitored_items(&self) -> &HashMap { + &self.monitored_items + } - pub fn subscription_id(&self) -> u32 { self.subscription_id } + pub fn subscription_id(&self) -> u32 { + self.subscription_id + } - pub fn publishing_interval(&self) -> f64 { self.publishing_interval } + pub fn publishing_interval(&self) -> f64 { + self.publishing_interval + } - pub fn lifetime_count(&self) -> u32 { self.lifetime_count } + pub fn lifetime_count(&self) -> u32 { + self.lifetime_count + } - pub fn max_keep_alive_count(&self) -> u32 { self.max_keep_alive_count } + pub fn max_keep_alive_count(&self) -> u32 { + self.max_keep_alive_count + } - pub fn max_notifications_per_publish(&self) -> u32 { self.max_notifications_per_publish } + pub fn max_notifications_per_publish(&self) -> u32 { + self.max_notifications_per_publish + } - pub fn publishing_enabled(&self) -> bool { self.publishing_enabled } + pub fn publishing_enabled(&self) -> bool { + self.publishing_enabled + } - pub fn priority(&self) -> u8 { self.priority } + pub fn priority(&self) -> u8 { + self.priority + } - pub fn notification_callback(&self) -> Arc> { self.notification_callback.clone() } + pub fn notification_callback( + &self, + ) -> Arc> { + self.notification_callback.clone() + } - pub(crate) fn set_publishing_interval(&mut self, publishing_interval: f64) { self.publishing_interval = publishing_interval; } + pub(crate) fn set_publishing_interval(&mut self, publishing_interval: f64) { + self.publishing_interval = publishing_interval; + } - pub(crate) fn set_lifetime_count(&mut self, lifetime_count: u32) { self.lifetime_count = lifetime_count; } + pub(crate) fn set_lifetime_count(&mut self, lifetime_count: u32) { + self.lifetime_count = lifetime_count; + } - pub(crate) fn set_max_keep_alive_count(&mut self, max_keep_alive_count: u32) { self.max_keep_alive_count = max_keep_alive_count; } + pub(crate) fn set_max_keep_alive_count(&mut self, max_keep_alive_count: u32) { + self.max_keep_alive_count = max_keep_alive_count; + } - pub(crate) fn set_max_notifications_per_publish(&mut self, max_notifications_per_publish: u32) { self.max_notifications_per_publish = max_notifications_per_publish; } + pub(crate) fn set_max_notifications_per_publish(&mut self, max_notifications_per_publish: u32) { + self.max_notifications_per_publish = max_notifications_per_publish; + } - pub(crate) fn set_priority(&mut self, priority: u8) { self.priority = priority; } + pub(crate) fn set_priority(&mut self, priority: u8) { + self.priority = priority; + } - pub(crate) fn set_publishing_enabled(&mut self, publishing_enabled: bool) { self.publishing_enabled = publishing_enabled; } + pub(crate) fn set_publishing_enabled(&mut self, publishing_enabled: bool) { + self.publishing_enabled = publishing_enabled; + } pub(crate) fn insert_monitored_items(&mut self, items_to_create: &[CreateMonitoredItem]) { items_to_create.iter().for_each(|i| { @@ -220,7 +277,8 @@ impl Subscription { let client_handle = monitored_item.client_handle(); let monitored_item_id = monitored_item.id(); - self.monitored_items.insert(monitored_item_id, monitored_item); + self.monitored_items + .insert(monitored_item_id, monitored_item); self.client_handles.insert(client_handle, monitored_item_id); }); } @@ -243,14 +301,21 @@ impl Subscription { }) } - pub(crate) fn set_triggering(&mut self, triggering_item_id: u32, links_to_add: &[u32], links_to_remove: &[u32]) { + pub(crate) fn set_triggering( + &mut self, + triggering_item_id: u32, + links_to_add: &[u32], + links_to_remove: &[u32], + ) { if let Some(ref mut monitored_item) = self.monitored_items.get_mut(&triggering_item_id) { monitored_item.set_triggering(links_to_add, links_to_remove); } } fn monitored_item_id_from_handle(&self, client_handle: u32) -> Option { - self.client_handles.get(&client_handle).map(|monitored_item_id| *monitored_item_id) + self.client_handles + .get(&client_handle) + .map(|monitored_item_id| *monitored_item_id) } pub(crate) fn on_event(&mut self, events: &[EventNotificationList]) { @@ -280,12 +345,14 @@ impl Subscription { } if !monitored_item_ids.is_empty() { - let data_change_items: Vec<&MonitoredItem> = monitored_item_ids.iter() - .map(|id| self.monitored_items.get(&id).unwrap()).collect(); + let data_change_items: Vec<&MonitoredItem> = monitored_item_ids + .iter() + .map(|id| self.monitored_items.get(&id).unwrap()) + .collect(); // Call the call back with the changes we collected let mut cb = trace_lock_unwrap!(self.notification_callback); cb.on_data_change(data_change_items); } } -} \ No newline at end of file +} diff --git a/client/src/subscription_state.rs b/client/src/subscription_state.rs index 7c5d77c5d..30a8af153 100644 --- a/client/src/subscription_state.rs +++ b/client/src/subscription_state.rs @@ -7,10 +7,7 @@ use std::sync::{Arc, RwLock}; use opcua_types::service_types::{DataChangeNotification, EventNotificationList}; -use crate::{ - subscription::*, - subscription_timer::SubscriptionTimer, -}; +use crate::{subscription::*, subscription_timer::SubscriptionTimer}; /// Holds the live subscription state pub struct SubscriptionState { @@ -37,7 +34,10 @@ impl SubscriptionState { debug!("Cancelling subscription timers"); self.subscription_timers.drain(..).for_each(|timer| { let mut timer = trace_write_lock_unwrap!(timer); - debug!("Cancelling subscription timer for subscription {}", timer.subscription_id()); + debug!( + "Cancelling subscription timer for subscription {}", + timer.subscription_id() + ); timer.cancel(); }) } @@ -59,10 +59,19 @@ impl SubscriptionState { } pub(crate) fn add_subscription(&mut self, subscription: Subscription) { - self.subscriptions.insert(subscription.subscription_id(), subscription); - } - - pub(crate) fn modify_subscription(&mut self, subscription_id: u32, publishing_interval: f64, lifetime_count: u32, max_keep_alive_count: u32, max_notifications_per_publish: u32, priority: u8) { + self.subscriptions + .insert(subscription.subscription_id(), subscription); + } + + pub(crate) fn modify_subscription( + &mut self, + subscription_id: u32, + publishing_interval: f64, + lifetime_count: u32, + max_keep_alive_count: u32, + max_notifications_per_publish: u32, + priority: u8, + ) { if let Some(ref mut subscription) = self.subscriptions.get_mut(&subscription_id) { subscription.set_publishing_interval(publishing_interval); subscription.set_lifetime_count(lifetime_count); @@ -76,7 +85,11 @@ impl SubscriptionState { self.subscriptions.remove(&subscription_id) } - pub(crate) fn set_publishing_mode(&mut self, subscription_ids: &[u32], publishing_enabled: bool) { + pub(crate) fn set_publishing_mode( + &mut self, + subscription_ids: &[u32], + publishing_enabled: bool, + ) { subscription_ids.iter().for_each(|subscription_id| { if let Some(ref mut subscription) = self.subscriptions.get_mut(subscription_id) { subscription.set_publishing_enabled(publishing_enabled); @@ -84,7 +97,11 @@ impl SubscriptionState { }); } - pub(crate) fn on_data_change(&mut self, subscription_id: u32, data_change_notifications: &[DataChangeNotification]) { + pub(crate) fn on_data_change( + &mut self, + subscription_id: u32, + data_change_notifications: &[DataChangeNotification], + ) { if let Some(ref mut subscription) = self.subscriptions.get_mut(&subscription_id) { subscription.on_data_change(data_change_notifications); } @@ -96,13 +113,21 @@ impl SubscriptionState { } } - pub(crate) fn insert_monitored_items(&mut self, subscription_id: u32, items_to_create: &[CreateMonitoredItem]) { + pub(crate) fn insert_monitored_items( + &mut self, + subscription_id: u32, + items_to_create: &[CreateMonitoredItem], + ) { if let Some(ref mut subscription) = self.subscriptions.get_mut(&subscription_id) { subscription.insert_monitored_items(items_to_create); } } - pub(crate) fn modify_monitored_items(&mut self, subscription_id: u32, items_to_modify: &[ModifyMonitoredItem]) { + pub(crate) fn modify_monitored_items( + &mut self, + subscription_id: u32, + items_to_modify: &[ModifyMonitoredItem], + ) { if let Some(ref mut subscription) = self.subscriptions.get_mut(&subscription_id) { subscription.modify_monitored_items(items_to_modify); } @@ -114,7 +139,13 @@ impl SubscriptionState { } } - pub(crate) fn set_triggering(&mut self, subscription_id: u32, triggering_item_id: u32, links_to_add: &[u32], links_to_remove: &[u32]) { + pub(crate) fn set_triggering( + &mut self, + subscription_id: u32, + triggering_item_id: u32, + links_to_add: &[u32], + links_to_remove: &[u32], + ) { if let Some(ref mut subscription) = self.subscriptions.get_mut(&subscription_id) { subscription.set_triggering(triggering_item_id, links_to_add, links_to_remove); } diff --git a/client/src/subscription_timer.rs b/client/src/subscription_timer.rs index f690f5465..74119fc7b 100644 --- a/client/src/subscription_timer.rs +++ b/client/src/subscription_timer.rs @@ -9,16 +9,14 @@ use std::{ }; use futures::{ - {future, Future}, stream::Stream, sync::mpsc::{unbounded, UnboundedSender}, + {future, Future}, }; use tokio; use tokio_timer::Interval; -use crate::{ - session_state::SessionState, subscription_state::SubscriptionState, -}; +use crate::{session_state::SessionState, subscription_state::SubscriptionState}; #[derive(Clone, Copy, PartialEq)] pub(crate) enum SubscriptionTimerCommand { @@ -39,36 +37,43 @@ impl SubscriptionTimer { /// /// Each subscription timer spawned by the thread runs as a timer task associated with a /// subscription. The subscription timer is responsible for publish requests to the server. - pub(crate) fn make_timer_command_queue(session_state: Arc>, subscription_state: Arc>) -> UnboundedSender { + pub(crate) fn make_timer_command_queue( + session_state: Arc>, + subscription_state: Arc>, + ) -> UnboundedSender { let (timer_command_queue, timer_receiver) = unbounded::(); let _ = thread::spawn(move || { // This listens for timer actions to spawn - let timer_task = timer_receiver.take_while(|cmd| { - let take = *cmd != SubscriptionTimerCommand::Quit; - future::ok(take) - }).map(move |cmd| { - (cmd, session_state.clone(), subscription_state.clone()) - }).for_each(|(cmd, session_state, subscription_state)| { - if let SubscriptionTimerCommand::CreateTimer(subscription_id) = cmd { - let timer = Arc::new(RwLock::new(SubscriptionTimer { - subscription_id, - session_state, - subscription_state: subscription_state.clone(), - cancel: false, - })); - { - let mut subscription_state = trace_write_lock_unwrap!(subscription_state); - subscription_state.add_subscription_timer(timer.clone()); + let timer_task = timer_receiver + .take_while(|cmd| { + let take = *cmd != SubscriptionTimerCommand::Quit; + future::ok(take) + }) + .map(move |cmd| (cmd, session_state.clone(), subscription_state.clone())) + .for_each(|(cmd, session_state, subscription_state)| { + if let SubscriptionTimerCommand::CreateTimer(subscription_id) = cmd { + let timer = Arc::new(RwLock::new(SubscriptionTimer { + subscription_id, + session_state, + subscription_state: subscription_state.clone(), + cancel: false, + })); + { + let mut subscription_state = + trace_write_lock_unwrap!(subscription_state); + subscription_state.add_subscription_timer(timer.clone()); + } + let timer_task = Self::make_subscription_timer(timer); + tokio::spawn(timer_task); } - let timer_task = Self::make_subscription_timer(timer); - tokio::spawn(timer_task); - } - future::ok(()) - }).map(|_| { - info!("Timer receiver has terminated"); - }).map_err(|_| { - error!("Timer receiver has terminated with an error"); - }); + future::ok(()) + }) + .map(|_| { + info!("Timer receiver has terminated"); + }) + .map_err(|_| { + error!("Timer receiver has terminated with an error"); + }); tokio::run(timer_task); }); timer_command_queue @@ -76,7 +81,9 @@ impl SubscriptionTimer { /// Makes a future that publishes requests for the subscription. This code doesn't return "impl Future" /// due to recursive behaviour in the take_while, so instead it returns a boxed future. - fn make_subscription_timer(timer: Arc>) -> Box + Send> { + fn make_subscription_timer( + timer: Arc>, + ) -> Box + Send> { let publishing_interval = { let (subscription_id, subscription_state) = { let timer = trace_read_lock_unwrap!(timer); @@ -87,7 +94,10 @@ impl SubscriptionTimer { if let Some(subscription) = ss.get(subscription_id) { subscription.publishing_interval() } else { - error!("Cannot start timer for subscription id {}, doesn't exist", subscription_id); + error!( + "Cannot start timer for subscription id {}, doesn't exist", + subscription_id + ); 100.0 } }; @@ -167,4 +177,4 @@ impl SubscriptionTimer { pub fn cancel(&mut self) { self.cancel = true; } -} \ No newline at end of file +} diff --git a/client/src/tests/mod.rs b/client/src/tests/mod.rs index f3e0105c5..91eab4be5 100644 --- a/client/src/tests/mod.rs +++ b/client/src/tests/mod.rs @@ -1,7 +1,4 @@ -use std::{ - self, - collections::BTreeMap, path::PathBuf, -}; +use std::{self, collections::BTreeMap, path::PathBuf}; use opcua_core::config::Config; use opcua_crypto::SecurityPolicy; @@ -9,7 +6,7 @@ use opcua_types::*; use crate::{ builder::ClientBuilder, - config::{ANONYMOUS_USER_TOKEN_ID, ClientConfig, ClientEndpoint, ClientUserToken}, + config::{ClientConfig, ClientEndpoint, ClientUserToken, ANONYMOUS_USER_TOKEN_ID}, session::Session, }; @@ -29,34 +26,52 @@ pub fn sample_builder() -> ClientBuilder { .trust_server_certs(true) .pki_dir("./pki") .endpoints(vec![ - ("sample_none", ClientEndpoint { - url: String::from("opc.tcp://127.0.0.1:4855/"), - security_policy: String::from(SecurityPolicy::None.to_str()), - security_mode: String::from(MessageSecurityMode::None), - user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), - }), - ("sample_basic128rsa15", ClientEndpoint { - url: String::from("opc.tcp://127.0.0.1:4855/"), - security_policy: String::from(SecurityPolicy::Basic128Rsa15.to_str()), - security_mode: String::from(MessageSecurityMode::SignAndEncrypt), - user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), - }), - ("sample_basic256", ClientEndpoint { - url: String::from("opc.tcp://127.0.0.1:4855/"), - security_policy: String::from(SecurityPolicy::Basic256.to_str()), - security_mode: String::from(MessageSecurityMode::SignAndEncrypt), - user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), - }), - ("sample_basic256sha256", ClientEndpoint { - url: String::from("opc.tcp://127.0.0.1:4855/"), - security_policy: String::from(SecurityPolicy::Basic256Sha256.to_str()), - security_mode: String::from(MessageSecurityMode::SignAndEncrypt), - user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), - }) + ( + "sample_none", + ClientEndpoint { + url: String::from("opc.tcp://127.0.0.1:4855/"), + security_policy: String::from(SecurityPolicy::None.to_str()), + security_mode: String::from(MessageSecurityMode::None), + user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), + }, + ), + ( + "sample_basic128rsa15", + ClientEndpoint { + url: String::from("opc.tcp://127.0.0.1:4855/"), + security_policy: String::from(SecurityPolicy::Basic128Rsa15.to_str()), + security_mode: String::from(MessageSecurityMode::SignAndEncrypt), + user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), + }, + ), + ( + "sample_basic256", + ClientEndpoint { + url: String::from("opc.tcp://127.0.0.1:4855/"), + security_policy: String::from(SecurityPolicy::Basic256.to_str()), + security_mode: String::from(MessageSecurityMode::SignAndEncrypt), + user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), + }, + ), + ( + "sample_basic256sha256", + ClientEndpoint { + url: String::from("opc.tcp://127.0.0.1:4855/"), + security_policy: String::from(SecurityPolicy::Basic256Sha256.to_str()), + security_mode: String::from(MessageSecurityMode::SignAndEncrypt), + user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), + }, + ), ]) .default_endpoint("sample_none") - .user_token("sample_user", ClientUserToken::user_pass("sample1", "sample1pwd")) - .user_token("sample_user2", ClientUserToken::user_pass("sample2", "sample2pwd")) + .user_token( + "sample_user", + ClientUserToken::user_pass("sample1", "sample1pwd"), + ) + .user_token( + "sample_user2", + ClientUserToken::user_pass("sample2", "sample2pwd"), + ) } pub fn default_sample_config() -> ClientConfig { @@ -107,11 +122,11 @@ fn client_invalid_security_policy_config() { security_policy: String::from("http://blah"), security_mode: String::from(MessageSecurityMode::None), user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), - }); + }, + ); assert!(!config.is_valid()); } - #[test] fn client_invalid_security_mode_config() { let mut config = default_sample_config(); @@ -124,7 +139,8 @@ fn client_invalid_security_mode_config() { security_policy: String::from(SecurityPolicy::Basic128Rsa15.to_uri()), security_mode: String::from("SingAndEncrypt"), user_token_id: ANONYMOUS_USER_TOKEN_ID.to_string(), - }); + }, + ); assert!(!config.is_valid()); } @@ -140,7 +156,8 @@ fn client_anonymous_user_tokens_id() { password: Some(String::new()), cert_path: None, private_key_path: None, - }); + }, + ); assert!(!config.is_valid()); } @@ -152,16 +169,37 @@ fn node_id_is_one_of() { ObjectId::UpdateEventDetails_Encoding_DefaultBinary, ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary, ObjectId::DeleteAtTimeDetails_Encoding_DefaultBinary, - ObjectId::DeleteEventDetails_Encoding_DefaultBinary + ObjectId::DeleteEventDetails_Encoding_DefaultBinary, ]; // Node ids that should not match - assert!(!Session::node_id_is_one_of(&NodeId::new(2, "hello"), &object_ids)); - assert!(!Session::node_id_is_one_of(&NodeId::new(2, ObjectId::DeleteAtTimeDetails_Encoding_DefaultBinary as u32), &object_ids)); - assert!(!Session::node_id_is_one_of(&NodeId::from(&VariableTypeId::DiscreteItemType), &object_ids)); - assert!(!Session::node_id_is_one_of(&NodeId::from(&ObjectId::AggregateFunction_Start), &object_ids)); + assert!(!Session::node_id_is_one_of( + &NodeId::new(2, "hello"), + &object_ids + )); + assert!(!Session::node_id_is_one_of( + &NodeId::new( + 2, + ObjectId::DeleteAtTimeDetails_Encoding_DefaultBinary as u32 + ), + &object_ids + )); + assert!(!Session::node_id_is_one_of( + &NodeId::from(&VariableTypeId::DiscreteItemType), + &object_ids + )); + assert!(!Session::node_id_is_one_of( + &NodeId::from(&ObjectId::AggregateFunction_Start), + &object_ids + )); // Node ids that should match - assert!(Session::node_id_is_one_of(&NodeId::from(&ObjectId::UpdateDataDetails_Encoding_DefaultBinary), &object_ids)); - assert!(Session::node_id_is_one_of(&NodeId::from(&ObjectId::DeleteEventDetails_Encoding_DefaultBinary), &object_ids)); + assert!(Session::node_id_is_one_of( + &NodeId::from(&ObjectId::UpdateDataDetails_Encoding_DefaultBinary), + &object_ids + )); + assert!(Session::node_id_is_one_of( + &NodeId::from(&ObjectId::DeleteEventDetails_Encoding_DefaultBinary), + &object_ids + )); } diff --git a/console-logging/src/lib.rs b/console-logging/src/lib.rs index 90e9aa64c..8df88393e 100644 --- a/console-logging/src/lib.rs +++ b/console-logging/src/lib.rs @@ -46,9 +46,16 @@ pub fn init() { _ => {} } - writeln!(buf, "{} - {} - {} - {}", time_fmt, style.value(record.level()), record.target(), record.args()) + writeln!( + buf, + "{} - {} - {} - {}", + time_fmt, + style.value(record.level()), + record.target(), + record.args() + ) }); builder.init(); info!("Logging is enabled, use RUST_OPCUA_LOG environment variable to control filtering, logging level"); } -} \ No newline at end of file +} diff --git a/core/src/comms/chunker.rs b/core/src/comms/chunker.rs index 989ffbd74..f49ca8613 100644 --- a/core/src/comms/chunker.rs +++ b/core/src/comms/chunker.rs @@ -9,10 +9,7 @@ use std::io::Cursor; use opcua_crypto::SecurityPolicy; use opcua_types::{ - encoding::BinaryEncoder, - node_id::NodeId, - node_ids::ObjectId, - status_code::StatusCode, + encoding::BinaryEncoder, node_id::NodeId, node_ids::ObjectId, status_code::StatusCode, }; use crate::{ @@ -30,9 +27,13 @@ impl Chunker { /// Tests what kind of chunk type is used for the supported message. fn message_type(message: &SupportedMessage) -> MessageChunkType { match message { - SupportedMessage::OpenSecureChannelRequest(_) | SupportedMessage::OpenSecureChannelResponse(_) => MessageChunkType::OpenSecureChannel, - SupportedMessage::CloseSecureChannelRequest(_) | SupportedMessage::CloseSecureChannelResponse(_) => MessageChunkType::CloseSecureChannel, - _ => MessageChunkType::Message + SupportedMessage::OpenSecureChannelRequest(_) + | SupportedMessage::OpenSecureChannelResponse(_) => MessageChunkType::OpenSecureChannel, + SupportedMessage::CloseSecureChannelRequest(_) + | SupportedMessage::CloseSecureChannelResponse(_) => { + MessageChunkType::CloseSecureChannel + } + _ => MessageChunkType::Message, } } @@ -41,13 +42,20 @@ impl Chunker { /// /// The function returns the last sequence number in the series for success, or /// `BadSequenceNumberInvalid` or `BadSecureChannelIdInvalid` for failure. - pub fn validate_chunks(starting_sequence_number: u32, secure_channel: &SecureChannel, chunks: &[MessageChunk]) -> Result { + pub fn validate_chunks( + starting_sequence_number: u32, + secure_channel: &SecureChannel, + chunks: &[MessageChunk], + ) -> Result { let first_sequence_number = { let chunk_info = chunks[0].chunk_info(secure_channel)?; chunk_info.sequence_header.sequence_number }; if first_sequence_number < starting_sequence_number { - error!("First sequence number of {} is less than last value {}", first_sequence_number, starting_sequence_number); + error!( + "First sequence number of {} is less than last value {}", + first_sequence_number, starting_sequence_number + ); Err(StatusCode::BadSequenceNumberInvalid) } else { let secure_channel_id = secure_channel.secure_channel_id(); @@ -58,8 +66,13 @@ impl Chunker { let chunk_info = chunk.chunk_info(secure_channel)?; // Check the channel id of each chunk - if secure_channel_id != 0 && chunk_info.message_header.secure_channel_id != secure_channel_id { - error!("Secure channel id {} does not match expected id {}", chunk_info.message_header.secure_channel_id, secure_channel_id); + if secure_channel_id != 0 + && chunk_info.message_header.secure_channel_id != secure_channel_id + { + error!( + "Secure channel id {} does not match expected id {}", + chunk_info.message_header.secure_channel_id, secure_channel_id + ); return Err(StatusCode::BadSecureChannelIdInvalid); } @@ -67,7 +80,10 @@ impl Chunker { let sequence_number = chunk_info.sequence_header.sequence_number; let expected_sequence_number = first_sequence_number + i as u32; if sequence_number != expected_sequence_number { - error!("Chunk sequence number of {} is not the expected value of {}, idx {}", sequence_number, expected_sequence_number, i); + error!( + "Chunk sequence number of {} is not the expected value of {}, idx {}", + sequence_number, expected_sequence_number, i + ); return Err(StatusCode::BadSecurityChecksFailed); } @@ -88,7 +104,14 @@ impl Chunker { /// max_chunk_size refers to the maximum byte length that a chunk should not exceed or 0 for no limit /// max_message_size refers to the maximum byte length of a message or 0 for no limit /// - pub fn encode(sequence_number: u32, request_id: u32, max_message_size: usize, max_chunk_size: usize, secure_channel: &SecureChannel, supported_message: &SupportedMessage) -> std::result::Result, StatusCode> { + pub fn encode( + sequence_number: u32, + request_id: u32, + max_message_size: usize, + max_chunk_size: usize, + secure_channel: &SecureChannel, + supported_message: &SupportedMessage, + ) -> std::result::Result, StatusCode> { let security_policy = secure_channel.security_policy(); if security_policy == SecurityPolicy::Unknown { panic!("Security policy cannot be unknown"); @@ -98,9 +121,16 @@ impl Chunker { // here makes as good a place as any to do that. let mut message_size = supported_message.byte_len(); if max_message_size > 0 && message_size > max_message_size { - error!("Max message size is {} and message {} exceeds that", max_message_size, message_size); + error!( + "Max message size is {} and message {} exceeds that", + max_message_size, message_size + ); // Client stack should report a BadRequestTooLarge, server BadResponseTooLarge - Err(if secure_channel.is_client_role() { StatusCode::BadRequestTooLarge } else { StatusCode::BadResponseTooLarge }) + Err(if secure_channel.is_client_role() { + StatusCode::BadRequestTooLarge + } else { + StatusCode::BadResponseTooLarge + }) } else { let node_id = supported_message.node_id(); message_size += node_id.byte_len(); @@ -114,11 +144,18 @@ impl Chunker { let data = stream.into_inner(); let result = if max_chunk_size > 0 { - let max_body_per_chunk = MessageChunk::body_size_from_message_size(message_type, secure_channel, max_chunk_size) - .map_err(|_| { - error!("body_size_from_message_size error for max_chunk_size = {}", max_chunk_size); - StatusCode::BadTcpInternalError - })?; + let max_body_per_chunk = MessageChunk::body_size_from_message_size( + message_type, + secure_channel, + max_chunk_size, + ) + .map_err(|_| { + error!( + "body_size_from_message_size error for max_chunk_size = {}", + max_chunk_size + ); + StatusCode::BadTcpInternalError + })?; // Multiple chunks means breaking the data up into sections. Fortunately // Rust has a nice function to do just that. @@ -131,12 +168,26 @@ impl Chunker { } else { MessageIsFinalType::Intermediate }; - let chunk = MessageChunk::new(sequence_number + i as u32, request_id, message_type, is_final, secure_channel, data_chunk)?; + let chunk = MessageChunk::new( + sequence_number + i as u32, + request_id, + message_type, + is_final, + secure_channel, + data_chunk, + )?; chunks.push(chunk); } chunks } else { - let chunk = MessageChunk::new(sequence_number, request_id, message_type, MessageIsFinalType::Final, secure_channel, &data)?; + let chunk = MessageChunk::new( + sequence_number, + request_id, + message_type, + MessageIsFinalType::Final, + secure_channel, + &data, + )?; vec![chunk] }; Ok(result) @@ -145,7 +196,11 @@ impl Chunker { /// Decodes a series of chunks to create a message. The message must be of a `SupportedMessage` /// type otherwise an error will occur. - pub fn decode(chunks: &[MessageChunk], secure_channel: &SecureChannel, expected_node_id: Option) -> std::result::Result { + pub fn decode( + chunks: &[MessageChunk], + secure_channel: &SecureChannel, + expected_node_id: Option, + ) -> std::result::Result { // Calculate the size of data held in all chunks let mut data_size: usize = 0; for (i, chunk) in chunks.iter().enumerate() { @@ -208,7 +263,10 @@ impl Chunker { } } - fn object_id_from_node_id(node_id: NodeId, expected_node_id: Option) -> Result { + fn object_id_from_node_id( + node_id: NodeId, + expected_node_id: Option, + ) -> Result { let valid_node_id = if node_id.namespace != 0 || !node_id.is_numeric() { // Must be ns 0 and numeric error!("Expecting chunk to contain a OPC UA request or response"); @@ -216,23 +274,32 @@ impl Chunker { } else if let Some(expected_node_id) = expected_node_id { let matches_expected = expected_node_id == node_id; if !matches_expected { - error!("Chunk node id {:?} does not match expected {:?}", node_id, expected_node_id); + error!( + "Chunk node id {:?} does not match expected {:?}", + node_id, expected_node_id + ); } matches_expected } else { true }; if !valid_node_id { - error!("The node id read from the stream was not accepted in this context {:?}", node_id); + error!( + "The node id read from the stream was not accepted in this context {:?}", + node_id + ); Err(StatusCode::BadUnexpectedError) } else { - node_id.as_object_id().map_err(|_| { - error!("The node {:?} was not an object id", node_id); - StatusCode::BadUnexpectedError - }).map(|object_id| { - trace!("Decoded node id / object id of {:?}", object_id); - object_id - }) + node_id + .as_object_id() + .map_err(|_| { + error!("The node {:?} was not an object id", node_id); + StatusCode::BadUnexpectedError + }) + .map(|object_id| { + trace!("Decoded node id / object id of {:?}", object_id); + object_id + }) } } -} \ No newline at end of file +} diff --git a/core/src/comms/message_chunk.rs b/core/src/comms/message_chunk.rs index fe70add7f..a94014e7b 100644 --- a/core/src/comms/message_chunk.rs +++ b/core/src/comms/message_chunk.rs @@ -8,19 +8,17 @@ use std; use std::io::{Cursor, Read, Write}; -use opcua_types::{ - *, - status_code::StatusCode, -}; +use opcua_types::{status_code::StatusCode, *}; use crate::comms::{ message_chunk_info::ChunkInfo, secure_channel::SecureChannel, - security_header::{AsymmetricSecurityHeader, SecurityHeader, SequenceHeader, SymmetricSecurityHeader}, + security_header::{ + AsymmetricSecurityHeader, SecurityHeader, SequenceHeader, SymmetricSecurityHeader, + }, tcp_types::{ - CHUNK_FINAL, CHUNK_FINAL_ERROR, CHUNK_INTERMEDIATE, - CHUNK_MESSAGE, CLOSE_SECURE_CHANNEL_MESSAGE, MIN_CHUNK_SIZE, - OPEN_SECURE_CHANNEL_MESSAGE, + CHUNK_FINAL, CHUNK_FINAL_ERROR, CHUNK_INTERMEDIATE, CHUNK_MESSAGE, + CLOSE_SECURE_CHANNEL_MESSAGE, MIN_CHUNK_SIZE, OPEN_SECURE_CHANNEL_MESSAGE, }, }; @@ -105,9 +103,9 @@ impl BinaryEncoder for MessageChunkHeader { let chunk_type_code = read_u8(stream)?; let is_final = match chunk_type_code { - CHUNK_FINAL => { MessageIsFinalType::Final } - CHUNK_INTERMEDIATE => { MessageIsFinalType::Intermediate } - CHUNK_FINAL_ERROR => { MessageIsFinalType::FinalError } + CHUNK_FINAL => MessageIsFinalType::Final, + CHUNK_INTERMEDIATE => MessageIsFinalType::Intermediate, + CHUNK_FINAL_ERROR => MessageIsFinalType::FinalError, _ => { error!("Invalid chunk type"); return Err(StatusCode::BadDecodingError); @@ -143,17 +141,19 @@ impl BinaryEncoder for MessageChunk { } fn encode(&self, stream: &mut S) -> EncodingResult { - stream.write(&self.data) - .map_err(|_| { - error!("Encoding error while writing to stream"); - StatusCode::BadEncodingError - }) + stream.write(&self.data).map_err(|_| { + error!("Encoding error while writing to stream"); + StatusCode::BadEncodingError + }) } - fn decode(in_stream: &mut S, decoding_limits: &DecodingLimits) -> EncodingResult { + fn decode( + in_stream: &mut S, + decoding_limits: &DecodingLimits, + ) -> EncodingResult { // Read the header out first - let chunk_header = MessageChunkHeader::decode(in_stream, decoding_limits) - .map_err(|err| { + let chunk_header = + MessageChunkHeader::decode(in_stream, decoding_limits).map_err(|err| { error!("Cannot decode chunk header {:?}", err); StatusCode::BadCommunicationError })?; @@ -183,10 +183,20 @@ impl BinaryEncoder for MessageChunk { } impl MessageChunk { - pub fn new(sequence_number: u32, request_id: u32, message_type: MessageChunkType, is_final: MessageIsFinalType, secure_channel: &SecureChannel, data: &[u8]) -> Result { + pub fn new( + sequence_number: u32, + request_id: u32, + message_type: MessageChunkType, + is_final: MessageIsFinalType, + secure_channel: &SecureChannel, + data: &[u8], + ) -> Result { // security header depends on message type let security_header = secure_channel.make_security_header(message_type); - let sequence_header = SequenceHeader { sequence_number, request_id }; + let sequence_header = SequenceHeader { + sequence_number, + request_id, + }; // Calculate the chunk body size let mut message_size = MESSAGE_CHUNK_HEADER_SIZE; @@ -194,7 +204,10 @@ impl MessageChunk { message_size += sequence_header.byte_len(); message_size += data.len(); - trace!("Creating a chunk with a size of {}, data excluding padding & signature", message_size); + trace!( + "Creating a chunk with a size of {}, data excluding padding & signature", + message_size + ); let secure_channel_id = secure_channel.secure_channel_id(); let chunk_header = MessageChunkHeader { message_type, @@ -213,22 +226,35 @@ impl MessageChunk { // write message let _ = stream.write(data); - Ok(MessageChunk { data: stream.into_inner() }) + Ok(MessageChunk { + data: stream.into_inner(), + }) } /// Calculates the body size that fit inside of a message chunk of a particular size. /// This requires calculating the size of the header, the signature, padding etc. and deducting it /// to reveal the message size - pub fn body_size_from_message_size(message_type: MessageChunkType, secure_channel: &SecureChannel, message_size: usize) -> Result { + pub fn body_size_from_message_size( + message_type: MessageChunkType, + secure_channel: &SecureChannel, + message_size: usize, + ) -> Result { if message_size < MIN_CHUNK_SIZE { - error!("message size {} is less than minimum allowed by the spec", message_size); + error!( + "message size {} is less than minimum allowed by the spec", + message_size + ); Err(()) } else { let security_header = secure_channel.make_security_header(message_type); let mut data_size = MESSAGE_CHUNK_HEADER_SIZE; data_size += security_header.byte_len(); - data_size += (SequenceHeader { sequence_number: 0, request_id: 0 }).byte_len(); + data_size += (SequenceHeader { + sequence_number: 0, + request_id: 0, + }) + .byte_len(); // 1 byte == most padding let signature_size = secure_channel.signature_size(&security_header); @@ -242,20 +268,33 @@ impl MessageChunk { } } - pub fn message_header(&self, decoding_limits: &DecodingLimits) -> Result { + pub fn message_header( + &self, + decoding_limits: &DecodingLimits, + ) -> Result { // Message header is first so just read it let mut stream = Cursor::new(&self.data); MessageChunkHeader::decode(&mut stream, decoding_limits) } - pub fn security_header(&self, decoding_limits: &DecodingLimits) -> Result { + pub fn security_header( + &self, + decoding_limits: &DecodingLimits, + ) -> Result { // Message header is first so just read it let mut stream = Cursor::new(&self.data); let message_header = MessageChunkHeader::decode(&mut stream, decoding_limits)?; - let security_header = if message_header.message_type == MessageChunkType::OpenSecureChannel { - SecurityHeader::Asymmetric(AsymmetricSecurityHeader::decode(&mut stream, decoding_limits)?) + let security_header = if message_header.message_type == MessageChunkType::OpenSecureChannel + { + SecurityHeader::Asymmetric(AsymmetricSecurityHeader::decode( + &mut stream, + decoding_limits, + )?) } else { - SecurityHeader::Symmetric(SymmetricSecurityHeader::decode(&mut stream, decoding_limits)?) + SecurityHeader::Symmetric(SymmetricSecurityHeader::decode( + &mut stream, + decoding_limits, + )?) }; Ok(security_header) } @@ -268,7 +307,10 @@ impl MessageChunk { } } - pub fn chunk_info(&self, secure_channel: &SecureChannel) -> std::result::Result { + pub fn chunk_info( + &self, + secure_channel: &SecureChannel, + ) -> std::result::Result { ChunkInfo::new(self, secure_channel) } } diff --git a/core/src/comms/message_chunk_info.rs b/core/src/comms/message_chunk_info.rs index 41ecf5cee..bec89d262 100644 --- a/core/src/comms/message_chunk_info.rs +++ b/core/src/comms/message_chunk_info.rs @@ -6,17 +6,19 @@ use std; use std::io::Cursor; use opcua_crypto::SecurityPolicy; -use opcua_types::BinaryEncoder; use opcua_types::status_code::StatusCode; +use opcua_types::BinaryEncoder; use crate::comms::{ message_chunk::{MessageChunk, MessageChunkHeader}, secure_channel::SecureChannel, - security_header::{AsymmetricSecurityHeader, SecurityHeader, SequenceHeader, SymmetricSecurityHeader}, + security_header::{ + AsymmetricSecurityHeader, SecurityHeader, SequenceHeader, SymmetricSecurityHeader, + }, }; /// Chunk info provides some basic information gleaned from reading the chunk such as offsets into -/// the chunk and so on. The chunk MUST be decrypted before calling this otherwise the values are +/// the chunk and so on. The chunk MUST be decrypted before calling this otherwise the values are /// garbage. #[derive(Debug, Clone, PartialEq)] pub struct ChunkInfo { @@ -36,7 +38,10 @@ pub struct ChunkInfo { } impl ChunkInfo { - pub fn new(chunk: &MessageChunk, secure_channel: &SecureChannel) -> std::result::Result { + pub fn new( + chunk: &MessageChunk, + secure_channel: &SecureChannel, + ) -> std::result::Result { let mut stream = Cursor::new(&chunk.data); let decoding_limits = secure_channel.decoding_limits(); @@ -48,9 +53,12 @@ impl ChunkInfo { let security_header = if chunk.is_open_secure_channel(&decoding_limits) { let security_header = AsymmetricSecurityHeader::decode(&mut stream, &decoding_limits) .map_err(|err| { - error!("chunk_info() cannot decode asymmetric security_header, {:?}", err); - StatusCode::BadCommunicationError - })?; + error!( + "chunk_info() cannot decode asymmetric security_header, {:?}", + err + ); + StatusCode::BadCommunicationError + })?; let security_policy = if security_header.security_policy_uri.is_null() { SecurityPolicy::None @@ -59,7 +67,10 @@ impl ChunkInfo { }; if security_policy == SecurityPolicy::Unknown { - error!("Security policy of chunk is unsupported, policy = {:?}", security_header.security_policy_uri); + error!( + "Security policy of chunk is unsupported, policy = {:?}", + security_header.security_policy_uri + ); return Err(StatusCode::BadSecurityPolicyRejected); } @@ -68,15 +79,18 @@ impl ChunkInfo { } else { let security_header = SymmetricSecurityHeader::decode(&mut stream, &decoding_limits) .map_err(|err| { - error!("chunk_info() cannot decode symmetric security_header, {:?}", err); + error!( + "chunk_info() cannot decode symmetric security_header, {:?}", + err + ); StatusCode::BadCommunicationError })?; SecurityHeader::Symmetric(security_header) }; let sequence_header_offset = stream.position() as usize; - let sequence_header = SequenceHeader::decode(&mut stream, &decoding_limits) - .map_err(|err| { + let sequence_header = + SequenceHeader::decode(&mut stream, &decoding_limits).map_err(|err| { error!("Cannot decode sequence header {:?}", err); StatusCode::BadCommunicationError })?; diff --git a/core/src/comms/message_writer.rs b/core/src/comms/message_writer.rs index 3cb8fd65c..a645b2b49 100644 --- a/core/src/comms/message_writer.rs +++ b/core/src/comms/message_writer.rs @@ -1,106 +1,112 @@ -// OPCUA for Rust -// SPDX-License-Identifier: MPL-2.0 -// Copyright (C) 2017-2020 Adam Lock - -use std::io::{Cursor, Write}; - -use opcua_types::{ - BinaryEncoder, EncodingResult, - status_code::StatusCode, -}; - -use crate::{ - comms::{ - chunker::Chunker, secure_channel::SecureChannel, - tcp_types::AcknowledgeMessage, - }, supported_message::SupportedMessage, -}; - -const DEFAULT_REQUEST_ID: u32 = 1000; -const DEFAULT_SENT_SEQUENCE_NUMBER: u32 = 0; - -/// SocketWriter is a wrapper around the writable half of a tokio stream and a buffer which -/// will be dumped into that stream. -pub struct MessageWriter { - /// The send buffer - buffer: Cursor>, - /// The last request id - last_request_id: u32, - /// Last sent sequence number - last_sent_sequence_number: u32, -} - -impl MessageWriter { - pub fn new(buffer_size: usize) -> MessageWriter { - MessageWriter { - buffer: Cursor::new(vec![0u8; buffer_size]), - last_request_id: DEFAULT_REQUEST_ID, - last_sent_sequence_number: DEFAULT_SENT_SEQUENCE_NUMBER, - } - } - - pub fn write_ack(&mut self, ack: &AcknowledgeMessage) -> EncodingResult { - ack.encode(&mut self.buffer) - } - - /// Encodes the message into a series of chunks, encrypts those chunks and writes the - /// result into the buffer ready to be sent. - pub fn write(&mut self, request_id: u32, message: SupportedMessage, secure_channel: &SecureChannel) -> Result { - trace!("Writing request to buffer"); - // Turn message to chunk(s) - // TODO max message size and max chunk size - let chunks = Chunker::encode( - self.last_sent_sequence_number + 1, request_id, - 0, 0, secure_channel, &message)?; - - // Sequence number monotonically increases per chunk - self.last_sent_sequence_number += chunks.len() as u32; - - // Send chunks - - // This max chunk size allows the message to be encoded to a chunk with header + encoding - // which is just slightly larger in size (up to 1024 bytes). - let max_chunk_size = self.buffer.get_ref().len() + 1024; - let mut data = vec![0u8; max_chunk_size]; - - let decoding_limits = secure_channel.decoding_limits(); - for chunk in chunks { - trace!("Sending chunk of type {:?}", chunk.message_header(&decoding_limits)?.message_type); - let size = { - secure_channel.apply_security(&chunk, &mut data) - }; - match size { - Ok(size) => { - if let Err(error) = self.buffer.write(&data[..size]) { - error!("Error while writing bytes to stream, connection broken, check error {:?}", error); - break; - } - } - Err(err) => { - panic!("Applying security to chunk failed - {:?}", err); - } - } - } - trace!("Message written"); - Ok(request_id) - } - - pub fn next_request_id(&mut self) -> u32 { - self.last_request_id += 1; - self.last_request_id - } - - /// Clears the buffer - fn clear(&mut self) { - self.buffer.set_position(0); - } - - /// Yields any results to write, resetting the buffer back afterwards - pub fn bytes_to_write(&mut self) -> Vec { - let pos = self.buffer.position() as usize; - let result = (self.buffer.get_ref())[0..pos].to_vec(); - // Buffer MUST be cleared here, otherwise races are possible - self.clear(); - result - } -} \ No newline at end of file +// OPCUA for Rust +// SPDX-License-Identifier: MPL-2.0 +// Copyright (C) 2017-2020 Adam Lock + +use std::io::{Cursor, Write}; + +use opcua_types::{status_code::StatusCode, BinaryEncoder, EncodingResult}; + +use crate::{ + comms::{chunker::Chunker, secure_channel::SecureChannel, tcp_types::AcknowledgeMessage}, + supported_message::SupportedMessage, +}; + +const DEFAULT_REQUEST_ID: u32 = 1000; +const DEFAULT_SENT_SEQUENCE_NUMBER: u32 = 0; + +/// SocketWriter is a wrapper around the writable half of a tokio stream and a buffer which +/// will be dumped into that stream. +pub struct MessageWriter { + /// The send buffer + buffer: Cursor>, + /// The last request id + last_request_id: u32, + /// Last sent sequence number + last_sent_sequence_number: u32, +} + +impl MessageWriter { + pub fn new(buffer_size: usize) -> MessageWriter { + MessageWriter { + buffer: Cursor::new(vec![0u8; buffer_size]), + last_request_id: DEFAULT_REQUEST_ID, + last_sent_sequence_number: DEFAULT_SENT_SEQUENCE_NUMBER, + } + } + + pub fn write_ack(&mut self, ack: &AcknowledgeMessage) -> EncodingResult { + ack.encode(&mut self.buffer) + } + + /// Encodes the message into a series of chunks, encrypts those chunks and writes the + /// result into the buffer ready to be sent. + pub fn write( + &mut self, + request_id: u32, + message: SupportedMessage, + secure_channel: &SecureChannel, + ) -> Result { + trace!("Writing request to buffer"); + // Turn message to chunk(s) + // TODO max message size and max chunk size + let chunks = Chunker::encode( + self.last_sent_sequence_number + 1, + request_id, + 0, + 0, + secure_channel, + &message, + )?; + + // Sequence number monotonically increases per chunk + self.last_sent_sequence_number += chunks.len() as u32; + + // Send chunks + + // This max chunk size allows the message to be encoded to a chunk with header + encoding + // which is just slightly larger in size (up to 1024 bytes). + let max_chunk_size = self.buffer.get_ref().len() + 1024; + let mut data = vec![0u8; max_chunk_size]; + + let decoding_limits = secure_channel.decoding_limits(); + for chunk in chunks { + trace!( + "Sending chunk of type {:?}", + chunk.message_header(&decoding_limits)?.message_type + ); + let size = { secure_channel.apply_security(&chunk, &mut data) }; + match size { + Ok(size) => { + if let Err(error) = self.buffer.write(&data[..size]) { + error!("Error while writing bytes to stream, connection broken, check error {:?}", error); + break; + } + } + Err(err) => { + panic!("Applying security to chunk failed - {:?}", err); + } + } + } + trace!("Message written"); + Ok(request_id) + } + + pub fn next_request_id(&mut self) -> u32 { + self.last_request_id += 1; + self.last_request_id + } + + /// Clears the buffer + fn clear(&mut self) { + self.buffer.set_position(0); + } + + /// Yields any results to write, resetting the buffer back afterwards + pub fn bytes_to_write(&mut self) -> Vec { + let pos = self.buffer.position() as usize; + let result = (self.buffer.get_ref())[0..pos].to_vec(); + // Buffer MUST be cleared here, otherwise races are possible + self.clear(); + result + } +} diff --git a/core/src/comms/mod.rs b/core/src/comms/mod.rs index 62cf70c8b..204db1de3 100644 --- a/core/src/comms/mod.rs +++ b/core/src/comms/mod.rs @@ -8,13 +8,13 @@ pub mod chunker; pub mod message_chunk; pub mod message_chunk_info; +pub mod message_writer; pub mod secure_channel; pub mod security_header; -pub mod message_writer; pub mod tcp_codec; -pub mod wrapped_tcp_stream; pub mod tcp_types; pub mod url; +pub mod wrapped_tcp_stream; pub mod prelude { pub use super::chunker::*; diff --git a/core/src/comms/secure_channel.rs b/core/src/comms/secure_channel.rs index c9990c636..033777f68 100644 --- a/core/src/comms/secure_channel.rs +++ b/core/src/comms/secure_channel.rs @@ -10,15 +10,14 @@ use chrono; use opcua_crypto::{ aeskey::AesKey, - CertificateStore, pkey::{KeySize, PrivateKey, PublicKey}, random, - SecurityPolicy, x509::X509, + CertificateStore, SecurityPolicy, }; -use opcua_types::*; use opcua_types::service_types::ChannelSecurityToken; use opcua_types::status_code::StatusCode; +use opcua_types::*; use crate::comms::{ message_chunk::{MessageChunk, MessageChunkHeader, MessageChunkType}, @@ -96,7 +95,11 @@ impl SecureChannel { (SecurityPolicy::None, MessageSecurityMode::None).into() } - pub fn new(certificate_store: Arc>, role: Role, decoding_limits: DecodingLimits) -> SecureChannel { + pub fn new( + certificate_store: Arc>, + role: Role, + decoding_limits: DecodingLimits, + ) -> SecureChannel { let (cert, private_key) = { let certificate_store = certificate_store.read().unwrap(); if let Ok((cert, pkey)) = certificate_store.read_own_cert_and_pkey() { @@ -235,27 +238,36 @@ impl SecureChannel { trace!("AsymmetricSecurityHeader security policy none"); AsymmetricSecurityHeader::none() } else { - let receiver_certificate_thumbprint = if let Some(ref remote_cert) = self.remote_cert { - remote_cert.thumbprint().as_byte_string() - } else { - ByteString::null() - }; - AsymmetricSecurityHeader::new(self.security_policy, self.cert.as_ref().unwrap(), receiver_certificate_thumbprint) + let receiver_certificate_thumbprint = + if let Some(ref remote_cert) = self.remote_cert { + remote_cert.thumbprint().as_byte_string() + } else { + ByteString::null() + }; + AsymmetricSecurityHeader::new( + self.security_policy, + self.cert.as_ref().unwrap(), + receiver_certificate_thumbprint, + ) }; - debug!("AsymmetricSecurityHeader = {:?}", asymmetric_security_header); + debug!( + "AsymmetricSecurityHeader = {:?}", + asymmetric_security_header + ); SecurityHeader::Asymmetric(asymmetric_security_header) } - _ => { - SecurityHeader::Symmetric(SymmetricSecurityHeader { - token_id: self.token_id, - }) - } + _ => SecurityHeader::Symmetric(SymmetricSecurityHeader { + token_id: self.token_id, + }), } } /// Creates a nonce for the connection. The nonce should be the same size as the symmetric key pub fn create_random_nonce(&mut self) { - if self.security_policy != SecurityPolicy::None && (self.security_mode == MessageSecurityMode::Sign || self.security_mode == MessageSecurityMode::SignAndEncrypt) { + if self.security_policy != SecurityPolicy::None + && (self.security_mode == MessageSecurityMode::Sign + || self.security_mode == MessageSecurityMode::SignAndEncrypt) + { self.local_nonce = vec![0u8; self.security_policy.secure_channel_nonce_length()]; random::bytes(&mut self.local_nonce); } else { @@ -265,7 +277,10 @@ impl SecureChannel { } /// Sets the remote certificate - pub fn set_remote_cert_from_byte_string(&mut self, remote_cert: &ByteString) -> Result<(), StatusCode> { + pub fn set_remote_cert_from_byte_string( + &mut self, + remote_cert: &ByteString, + ) -> Result<(), StatusCode> { self.remote_cert = if remote_cert.is_null() { None } else { @@ -284,11 +299,22 @@ impl SecureChannel { } /// Set their nonce which should be the same as the symmetric key - pub fn set_remote_nonce_from_byte_string(&mut self, remote_nonce: &ByteString) -> Result<(), StatusCode> { - if self.security_policy != SecurityPolicy::None && (self.security_mode == MessageSecurityMode::Sign || self.security_mode == MessageSecurityMode::SignAndEncrypt) { + pub fn set_remote_nonce_from_byte_string( + &mut self, + remote_nonce: &ByteString, + ) -> Result<(), StatusCode> { + if self.security_policy != SecurityPolicy::None + && (self.security_mode == MessageSecurityMode::Sign + || self.security_mode == MessageSecurityMode::SignAndEncrypt) + { if let Some(ref remote_nonce) = remote_nonce.value { if remote_nonce.len() != self.security_policy.secure_channel_nonce_length() { - error!("Remote nonce is invalid length {}, expecting {}. {:?}", remote_nonce.len(), self.security_policy.secure_channel_nonce_length(), remote_nonce); + error!( + "Remote nonce is invalid length {}, expecting {}. {:?}", + remote_nonce.len(), + self.security_policy.secure_channel_nonce_length(), + remote_nonce + ); Err(StatusCode::BadNonceInvalid) } else { self.remote_nonce = remote_nonce.to_vec(); @@ -299,7 +325,11 @@ impl SecureChannel { Err(StatusCode::BadNonceInvalid) } } else { - trace!("set_remote_nonce is doing nothing because security policy = {:?}, mode = {:?}", self.security_policy, self.security_mode); + trace!( + "set_remote_nonce is doing nothing because security policy = {:?}, mode = {:?}", + self.security_policy, + self.security_mode + ); Ok(()) } } @@ -338,8 +368,14 @@ impl SecureChannel { /// are used to secure Messages sent by the Server. /// pub fn derive_keys(&mut self) { - self.remote_keys = Some(self.security_policy.make_secure_channel_keys(&self.local_nonce, &self.remote_nonce)); - self.local_keys = Some(self.security_policy.make_secure_channel_keys(&self.remote_nonce, &self.local_nonce)); + self.remote_keys = Some( + self.security_policy + .make_secure_channel_keys(&self.local_nonce, &self.remote_nonce), + ); + self.local_keys = Some( + self.security_policy + .make_secure_channel_keys(&self.remote_nonce, &self.local_nonce), + ); trace!("Remote nonce = {:?}", self.remote_nonce); trace!("Local nonce = {:?}", self.local_nonce); trace!("Derived remote keys = {:?}", self.remote_keys); @@ -350,7 +386,8 @@ impl SecureChannel { pub fn token_has_expired(&self) -> bool { let now: chrono::DateTime = DateTime::now().into(); let token_created_at: chrono::DateTime = self.token_created_at.clone().into(); - let token_expires = token_created_at + chrono::Duration::seconds(self.token_lifetime as i64); + let token_expires = + token_created_at + chrono::Duration::seconds(self.token_lifetime as i64); now.ge(&token_expires) } @@ -376,14 +413,25 @@ impl SecureChannel { // Extra padding required for keysize > 2048 bits (256 bytes) fn minimum_padding(signature_size: usize) -> usize { - if signature_size <= 256 { 1 } else { 2 } + if signature_size <= 256 { + 1 + } else { + 2 + } } /// Calculate the padding size /// /// Padding adds bytes to the body to make it a multiple of the block size so it can be encrypted. - pub fn padding_size(&self, security_header: &SecurityHeader, body_size: usize, signature_size: usize) -> usize { - if self.security_policy != SecurityPolicy::None && self.security_mode != MessageSecurityMode::None { + pub fn padding_size( + &self, + security_header: &SecurityHeader, + body_size: usize, + signature_size: usize, + ) -> usize { + if self.security_policy != SecurityPolicy::None + && self.security_mode != MessageSecurityMode::None + { // Signature size in bytes let plain_text_block_size = match security_header { SecurityHeader::Asymmetric(security_header) => { @@ -393,7 +441,8 @@ impl SecureChannel { } else { // Padding requires we look at the sending key and security policy let padding = self.security_policy.asymmetric_encryption_padding(); - let x509 = X509::from_byte_string(&security_header.sender_certificate).unwrap(); + let x509 = + X509::from_byte_string(&security_header.sender_certificate).unwrap(); x509.public_key().unwrap().plain_text_block_size(padding) } } @@ -420,7 +469,10 @@ impl SecureChannel { // Takes an unpadded message chunk and adds padding as well as space to the end to accomodate a signature. // Also modifies the message size to include the new padding/signature - fn add_space_for_padding_and_signature(&self, message_chunk: &MessageChunk) -> Result, StatusCode> { + fn add_space_for_padding_and_signature( + &self, + message_chunk: &MessageChunk, + ) -> Result, StatusCode> { let chunk_info = message_chunk.chunk_info(self)?; let data = &message_chunk.data[..]; @@ -451,7 +503,11 @@ impl SecureChannel { // Padding and then extra padding let padding_byte = ((padding_size - 2) & 0xff) as u8; let extra_padding_byte = ((padding_size - 2) >> 8) as u8; - trace!("adding extra padding - padding_byte = {}, extra_padding_byte = {}", padding_byte, extra_padding_byte); + trace!( + "adding extra padding - padding_byte = {}, extra_padding_byte = {}", + padding_byte, + extra_padding_byte + ); let _ = write_bytes(&mut stream, padding_byte, padding_size - 1)?; write_u8(&mut stream, extra_padding_byte)?; } @@ -462,10 +518,18 @@ impl SecureChannel { // Update message header to reflect size with padding + signature let message_size = data.len() + padding_size + signature_size; - Self::update_message_size_and_truncate(stream.into_inner(), message_size, &self.decoding_limits) + Self::update_message_size_and_truncate( + stream.into_inner(), + message_size, + &self.decoding_limits, + ) } - fn update_message_size(data: &mut [u8], message_size: usize, decoding_limits: &DecodingLimits) -> Result<(), StatusCode> { + fn update_message_size( + data: &mut [u8], + message_size: usize, + decoding_limits: &DecodingLimits, + ) -> Result<(), StatusCode> { // Read and rewrite the message_size in the header let mut stream = Cursor::new(data); let mut message_header = MessageChunkHeader::decode(&mut stream, &decoding_limits)?; @@ -473,12 +537,20 @@ impl SecureChannel { let old_message_size = message_header.message_size; message_header.message_size = message_size as u32; message_header.encode(&mut stream)?; - trace!("Message header message size being modified from {} to {}", old_message_size, message_size); + trace!( + "Message header message size being modified from {} to {}", + old_message_size, + message_size + ); Ok(()) } // Truncates a vec and writes the message size - pub fn update_message_size_and_truncate(mut data: Vec, message_size: usize, decoding_limits: &DecodingLimits) -> Result, StatusCode> { + pub fn update_message_size_and_truncate( + mut data: Vec, + message_size: usize, + decoding_limits: &DecodingLimits, + ) -> Result, StatusCode> { Self::update_message_size(&mut data[..], message_size, decoding_limits)?; // Truncate vector to the size data.truncate(message_size); @@ -491,8 +563,15 @@ impl SecureChannel { } /// Applies security to a message chunk and yields a encrypted/signed block to be streamed - pub fn apply_security(&self, message_chunk: &MessageChunk, dst: &mut [u8]) -> Result { - let size = if self.security_policy != SecurityPolicy::None && (self.security_mode == MessageSecurityMode::Sign || self.security_mode == MessageSecurityMode::SignAndEncrypt) { + pub fn apply_security( + &self, + message_chunk: &MessageChunk, + dst: &mut [u8], + ) -> Result { + let size = if self.security_policy != SecurityPolicy::None + && (self.security_mode == MessageSecurityMode::Sign + || self.security_mode == MessageSecurityMode::SignAndEncrypt) + { let chunk_info = message_chunk.chunk_info(self)?; // S - Message Header @@ -514,7 +593,8 @@ impl SecureChannel { self.asymmetric_sign_and_encrypt(self.security_policy, &data, encrypted_range, dst)? } else { // Symmetric encrypt and sign - let signed_range = 0..(data.len() - self.security_policy.symmetric_signature_size()); + let signed_range = + 0..(data.len() - self.security_policy.symmetric_signature_size()); self.symmetric_sign_and_encrypt(&data, signed_range, encrypted_range, dst)? }; @@ -541,15 +621,25 @@ impl SecureChannel { /// /// Note, that normally we do not have "their" key but for testing purposes and forensics, we /// might have the key - pub fn verify_and_remove_security_forensic(&mut self, src: &[u8], their_key: Option) -> Result { + pub fn verify_and_remove_security_forensic( + &mut self, + src: &[u8], + their_key: Option, + ) -> Result { // Get message & security header from data let (message_header, security_header, encrypted_data_offset) = { let mut stream = Cursor::new(&src); let message_header = MessageChunkHeader::decode(&mut stream, &self.decoding_limits)?; let security_header = if message_header.message_type.is_open_secure_channel() { - SecurityHeader::Asymmetric(AsymmetricSecurityHeader::decode(&mut stream, &self.decoding_limits)?) + SecurityHeader::Asymmetric(AsymmetricSecurityHeader::decode( + &mut stream, + &self.decoding_limits, + )?) } else { - SecurityHeader::Symmetric(SymmetricSecurityHeader::decode(&mut stream, &self.decoding_limits)?) + SecurityHeader::Symmetric(SymmetricSecurityHeader::decode( + &mut stream, + &self.decoding_limits, + )?) }; let encrypted_data_offset = stream.position() as usize; (message_header, security_header, encrypted_data_offset) @@ -557,7 +647,11 @@ impl SecureChannel { let message_size = message_header.message_size as usize; if message_size != src.len() { - error!("The message size {} is not the same as the supplied buffer {}", message_size, src.len()); + error!( + "The message size {} is not the same as the supplied buffer {}", + message_size, + src.len() + ); return Err(StatusCode::BadUnexpectedError); } @@ -611,8 +705,16 @@ impl SecureChannel { // TODO return } - let sender_certificate_len = security_header.sender_certificate.value.as_ref().unwrap().len(); - trace!("Sender certificate byte length = {}", sender_certificate_len); + let sender_certificate_len = security_header + .sender_certificate + .value + .as_ref() + .unwrap() + .len(); + trace!( + "Sender certificate byte length = {}", + sender_certificate_len + ); let sender_certificate = X509::from_byte_string(&security_header.sender_certificate)?; let verification_key = sender_certificate.public_key()?; @@ -620,21 +722,49 @@ impl SecureChannel { trace!("Receiver thumbprint = {:?}", receiver_thumbprint); let mut decrypted_data = vec![0u8; message_size]; - let decrypted_size = self.asymmetric_decrypt_and_verify(security_policy, &verification_key, receiver_thumbprint, src, encrypted_range, their_key, &mut decrypted_data)?; - - Self::update_message_size_and_truncate(decrypted_data, decrypted_size, &self.decoding_limits)? - } else if self.security_policy != SecurityPolicy::None && (self.security_mode == MessageSecurityMode::Sign || self.security_mode == MessageSecurityMode::SignAndEncrypt) { + let decrypted_size = self.asymmetric_decrypt_and_verify( + security_policy, + &verification_key, + receiver_thumbprint, + src, + encrypted_range, + their_key, + &mut decrypted_data, + )?; + + Self::update_message_size_and_truncate( + decrypted_data, + decrypted_size, + &self.decoding_limits, + )? + } else if self.security_policy != SecurityPolicy::None + && (self.security_mode == MessageSecurityMode::Sign + || self.security_mode == MessageSecurityMode::SignAndEncrypt) + { // Symmetric decrypt and verify let signature_size = self.security_policy.symmetric_signature_size(); let encrypted_range = encrypted_data_offset..message_size; let signed_range = 0..(message_size - signature_size); - trace!("Decrypting block with signature info {:?} and encrypt info {:?}", signed_range, encrypted_range); + trace!( + "Decrypting block with signature info {:?} and encrypt info {:?}", + signed_range, + encrypted_range + ); let mut decrypted_data = vec![0u8; message_size]; - let decrypted_size = self.symmetric_decrypt_and_verify(src, signed_range, encrypted_range, &mut decrypted_data)?; + let decrypted_size = self.symmetric_decrypt_and_verify( + src, + signed_range, + encrypted_range, + &mut decrypted_data, + )?; // Now we need to strip off signature - Self::update_message_size_and_truncate(decrypted_data, decrypted_size - signature_size, &self.decoding_limits)? + Self::update_message_size_and_truncate( + decrypted_data, + decrypted_size - signature_size, + &self.decoding_limits, + )? } else { src.to_vec() }; @@ -643,7 +773,13 @@ impl SecureChannel { } /// Use the security policy to asymmetric encrypt and sign the specified chunk of data - fn asymmetric_sign_and_encrypt(&self, security_policy: SecurityPolicy, src: &[u8], encrypted_range: Range, dst: &mut [u8]) -> Result { + fn asymmetric_sign_and_encrypt( + &self, + security_policy: SecurityPolicy, + src: &[u8], + encrypted_range: Range, + dst: &mut [u8], + ) -> Result { let header_size = encrypted_range.start; let signing_key = self.private_key.as_ref().unwrap(); @@ -665,14 +801,27 @@ impl SecureChannel { let cipher_text_size = { let padding = security_policy.asymmetric_encryption_padding(); let plain_text_size = encrypted_range.end - encrypted_range.start; - let cipher_text_size = encryption_key.calculate_cipher_text_size(plain_text_size, padding); - trace!("plain_text_size = {}, encrypted_text_size = {}", plain_text_size, cipher_text_size); + let cipher_text_size = + encryption_key.calculate_cipher_text_size(plain_text_size, padding); + trace!( + "plain_text_size = {}, encrypted_text_size = {}", + plain_text_size, + cipher_text_size + ); cipher_text_size }; - Self::update_message_size(&mut tmp[..], header_size + cipher_text_size, &self.decoding_limits)?; + Self::update_message_size( + &mut tmp[..], + header_size + cipher_text_size, + &self.decoding_limits, + )?; // Sign the message header, security header, sequence header, body, padding - security_policy.asymmetric_sign(&signing_key, &tmp[signed_range.clone()], &mut signature)?; + security_policy.asymmetric_sign( + &signing_key, + &tmp[signed_range.clone()], + &mut signature, + )?; tmp[signature_range.clone()].copy_from_slice(&signature); assert_eq!(encrypted_range.end, signature_range.end); @@ -682,11 +831,18 @@ impl SecureChannel { dst[..encrypted_range.start].copy_from_slice(&tmp[..encrypted_range.start]); // Encrypt the sequence header, payload, signature portion into dst - let encrypted_size = security_policy.asymmetric_encrypt(&encryption_key, &tmp[encrypted_range.clone()], &mut dst[encrypted_range.start..])?; + let encrypted_size = security_policy.asymmetric_encrypt( + &encryption_key, + &tmp[encrypted_range.clone()], + &mut dst[encrypted_range.start..], + )?; // Validate encrypted size is right if encrypted_size != cipher_text_size { - panic!("Encrypted block size {} is not the same as calculated cipher text size {}", encrypted_size, cipher_text_size); + panic!( + "Encrypted block size {} is not the same as calculated cipher text size {}", + encrypted_size, cipher_text_size + ); } //{ @@ -698,10 +854,19 @@ impl SecureChannel { Ok(header_size + encrypted_size) } - fn check_padding_bytes(padding_bytes: &[u8], expected_padding_byte: u8, padding_range_start: usize) -> Result<(), StatusCode> { + fn check_padding_bytes( + padding_bytes: &[u8], + expected_padding_byte: u8, + padding_range_start: usize, + ) -> Result<(), StatusCode> { for (i, b) in padding_bytes.iter().enumerate() { if *b != expected_padding_byte { - error!("Expected padding byte {}, got {} at index {}", expected_padding_byte, *b, padding_range_start + i); + error!( + "Expected padding byte {}, got {} at index {}", + expected_padding_byte, + *b, + padding_range_start + i + ); return Err(StatusCode::BadSecurityChecksFailed); } } @@ -711,7 +876,12 @@ impl SecureChannel { /// Verify that the padding is correct. Padding is expected to be before the supplied padding end index. /// /// Function returns the padding range so caller can strip the range if it so desires. - fn verify_padding(&self, src: &[u8], key_size: usize, padding_end: usize) -> Result, StatusCode> { + fn verify_padding( + &self, + src: &[u8], + key_size: usize, + padding_end: usize, + ) -> Result, StatusCode> { let padding_range = if key_size > 256 { let padding_byte = src[padding_end - 2]; let extra_padding_byte = src[padding_end - 1]; @@ -721,9 +891,16 @@ impl SecureChannel { trace!("Extra padding - extra_padding_byte = {}, padding_byte = {}, padding_end = {}, padding_size = {}", extra_padding_byte, padding_byte, padding_end, padding_size); // Check padding bytes and extra padding byte - Self::check_padding_bytes(&src[padding_range.start..(padding_range.end - 1)], padding_byte, padding_range.start)?; + Self::check_padding_bytes( + &src[padding_range.start..(padding_range.end - 1)], + padding_byte, + padding_range.start, + )?; if src[padding_range.end - 1] != extra_padding_byte { - error!("Expected extra padding byte {}, at index {}", extra_padding_byte, padding_range.start); + error!( + "Expected extra padding byte {}, at index {}", + extra_padding_byte, padding_range.start + ); return Err(StatusCode::BadSecurityChecksFailed); } padding_range @@ -732,14 +909,27 @@ impl SecureChannel { let padding_size = padding_byte as usize; let padding_range = (padding_end - padding_size - 1)..padding_end; // Check padding bytes - Self::check_padding_bytes(&src[padding_range.clone()], padding_byte, padding_range.start)?; + Self::check_padding_bytes( + &src[padding_range.clone()], + padding_byte, + padding_range.start, + )?; padding_range }; trace!("padding_range = {:?}", padding_range); Ok(padding_range) } - fn asymmetric_decrypt_and_verify(&self, security_policy: SecurityPolicy, verification_key: &PublicKey, receiver_thumbprint: ByteString, src: &[u8], encrypted_range: Range, their_key: Option, dst: &mut [u8]) -> Result { + fn asymmetric_decrypt_and_verify( + &self, + security_policy: SecurityPolicy, + verification_key: &PublicKey, + receiver_thumbprint: ByteString, + src: &[u8], + encrypted_range: Range, + their_key: Option, + dst: &mut [u8], + ) -> Result { // Asymmetric encrypt requires the caller supply the security policy if !security_policy.is_supported() { error!("Security policy {} is not supported by asymmetric_decrypt_and_verify and has been rejected", security_policy); @@ -772,19 +962,33 @@ impl SecureChannel { let mut decrypted_tmp = vec![0u8; encrypted_size]; let private_key = self.private_key.as_ref().unwrap(); - let decrypted_size = security_policy.asymmetric_decrypt(private_key, &src[encrypted_range.clone()], &mut decrypted_tmp)?; - trace!("Decrypted bytes = {} compared to encrypted range {}", decrypted_size, encrypted_size); + let decrypted_size = security_policy.asymmetric_decrypt( + private_key, + &src[encrypted_range.clone()], + &mut decrypted_tmp, + )?; + trace!( + "Decrypted bytes = {} compared to encrypted range {}", + decrypted_size, + encrypted_size + ); // Self::log_crypto_data("Decrypted Bytes = ", &decrypted_tmp[..decrypted_size]); let verification_key_signature_size = verification_key.size(); - trace!("Verification key size = {}", verification_key_signature_size); + trace!( + "Verification key size = {}", + verification_key_signature_size + ); // Copy the bytes to dst - dst[encrypted_range.start..(encrypted_range.start + decrypted_size)].copy_from_slice(&decrypted_tmp[0..decrypted_size]); + dst[encrypted_range.start..(encrypted_range.start + decrypted_size)] + .copy_from_slice(&decrypted_tmp[0..decrypted_size]); // The signature range is at the end of the decrypted block for the verification key's signature - let signature_dst_offset = encrypted_range.start + decrypted_size - verification_key_signature_size; - let signature_range_dst = signature_dst_offset..(signature_dst_offset + verification_key_signature_size); + let signature_dst_offset = + encrypted_range.start + decrypted_size - verification_key_signature_size; + let signature_range_dst = + signature_dst_offset..(signature_dst_offset + verification_key_signature_size); // The signed range is from 0 to the end of the plaintext except for key size let signed_range_dst = 0..signature_dst_offset; @@ -792,11 +996,21 @@ impl SecureChannel { // Self::log_crypto_data("Decrypted data = ", &dst[..signature_range_dst.end]); // Verify signature (contained encrypted portion) using verification key - trace!("Verifying signature range {:?} with signature at {:?}", signed_range_dst, signature_range_dst); - security_policy.asymmetric_verify_signature(verification_key, &dst[signed_range_dst.clone()], &dst[signature_range_dst.clone()], their_key)?; + trace!( + "Verifying signature range {:?} with signature at {:?}", + signed_range_dst, + signature_range_dst + ); + security_policy.asymmetric_verify_signature( + verification_key, + &dst[signed_range_dst.clone()], + &dst[signature_range_dst.clone()], + their_key, + )?; // Verify that the padding is correct - let padding_range = self.verify_padding(dst, verification_key.size(), signature_range_dst.start)?; + let padding_range = + self.verify_padding(dst, verification_key.size(), signature_range_dst.start)?; // Decrypted and verified into dst Ok(padding_range.start) @@ -874,7 +1088,13 @@ impl SecureChannel { /// S - Body - E /// S - Padding - E /// Signature - E - pub fn symmetric_sign_and_encrypt(&self, src: &[u8], signed_range: Range, encrypted_range: Range, dst: &mut [u8]) -> Result { + pub fn symmetric_sign_and_encrypt( + &self, + src: &[u8], + signed_range: Range, + encrypted_range: Range, + dst: &mut [u8], + ) -> Result { let encrypted_size = match self.security_mode { MessageSecurityMode::None => { trace!("encrypt_and_sign is doing nothing because security mode == None"); @@ -899,7 +1119,12 @@ impl SecureChannel { // Encrypt the sequence header, payload, signature let (key, iv) = self.encryption_keys(); - let encrypted_size = self.security_policy.symmetric_encrypt(key, iv, &dst_tmp[encrypted_range.clone()], &mut dst[encrypted_range.start..(encrypted_range.end + 16)])?; + let encrypted_size = self.security_policy.symmetric_encrypt( + key, + iv, + &dst_tmp[encrypted_range.clone()], + &mut dst[encrypted_range.start..(encrypted_range.end + 16)], + )?; // Copy the message header / security header dst[..encrypted_range.start].copy_from_slice(&dst_tmp[..encrypted_range.start]); @@ -912,15 +1137,29 @@ impl SecureChannel { Ok(encrypted_size) } - fn symmetric_sign(&self, src: &[u8], signed_range: Range, dst: &mut [u8]) -> Result { + fn symmetric_sign( + &self, + src: &[u8], + signed_range: Range, + dst: &mut [u8], + ) -> Result { let signature_size = self.security_policy.symmetric_signature_size(); let mut signature = vec![0u8; signature_size]; let signature_range = signed_range.end..(signed_range.end + signature_size); - trace!("signed_range = {:?}, signature range = {:?}, signature len = {}", signed_range, signature_range, signature_size); + trace!( + "signed_range = {:?}, signature range = {:?}, signature len = {}", + signed_range, + signature_range, + signature_size + ); // Sign the message header, security header, sequence header, body, padding let signing_key = self.signing_key(); - self.security_policy.symmetric_sign(signing_key, &src[signed_range.clone()], &mut signature)?; + self.security_policy.symmetric_sign( + signing_key, + &src[signed_range.clone()], + &mut signature, + )?; trace!("Signature, len {} = {:?}", signature.len(), signature); @@ -941,7 +1180,13 @@ impl SecureChannel { /// S - Body - E /// S - Padding - E /// Signature - E - pub fn symmetric_decrypt_and_verify(&self, src: &[u8], signed_range: Range, encrypted_range: Range, dst: &mut [u8]) -> Result { + pub fn symmetric_decrypt_and_verify( + &self, + src: &[u8], + signed_range: Range, + encrypted_range: Range, + dst: &mut [u8], + ) -> Result { match self.security_mode { MessageSecurityMode::None => { // Just copy everything from src to dst @@ -955,9 +1200,17 @@ impl SecureChannel { trace!("copying from slice {:?}", all); dst[all].copy_from_slice(&src[all]); // Verify signature - trace!("Verifying range from {:?} to signature {}..", signed_range, signed_range.end); + trace!( + "Verifying range from {:?} to signature {}..", + signed_range, + signed_range.end + ); let verification_key = self.verification_key(); - self.security_policy.symmetric_verify_signature(verification_key, &dst[signed_range.clone()], &dst[signed_range.end..])?; + self.security_policy.symmetric_verify_signature( + verification_key, + &dst[signed_range.clone()], + &dst[signed_range.end..], + )?; Ok(encrypted_range.end) } @@ -978,19 +1231,38 @@ impl SecureChannel { let mut decrypted_tmp = vec![0u8; ciphertext_size + 16]; // tmp includes +16 for blocksize let (key, iv) = self.decryption_keys(); - trace!("Secure decrypt called with encrypted range {:?}", encrypted_range); - let decrypted_size = self.security_policy.symmetric_decrypt(key, iv, &src[encrypted_range.clone()], &mut decrypted_tmp[..])?; + trace!( + "Secure decrypt called with encrypted range {:?}", + encrypted_range + ); + let decrypted_size = self.security_policy.symmetric_decrypt( + key, + iv, + &src[encrypted_range.clone()], + &mut decrypted_tmp[..], + )?; // Self::log_crypto_data("Encrypted buffer", &src[..encrypted_range.end]); - let encrypted_range = encrypted_range.start..(encrypted_range.start + decrypted_size); + let encrypted_range = + encrypted_range.start..(encrypted_range.start + decrypted_size); dst[encrypted_range.clone()].copy_from_slice(&decrypted_tmp[..decrypted_size]); Self::log_crypto_data("Decrypted buffer", &dst[..encrypted_range.end]); // Verify signature (after encrypted portion) - let signature_range = (encrypted_range.end - self.security_policy.symmetric_signature_size())..encrypted_range.end; - trace!("signed range = {:?}, signature range = {:?}", signed_range, signature_range); + let signature_range = (encrypted_range.end + - self.security_policy.symmetric_signature_size()) + ..encrypted_range.end; + trace!( + "signed range = {:?}, signature range = {:?}", + signed_range, + signature_range + ); let verification_key = self.verification_key(); - self.security_policy.symmetric_verify_signature(verification_key, &dst[signed_range.clone()], &dst[signature_range])?; + self.security_policy.symmetric_verify_signature( + verification_key, + &dst[signed_range.clone()], + &dst[signature_range], + )?; Ok(encrypted_range.end) } MessageSecurityMode::Invalid => { @@ -1003,11 +1275,11 @@ impl SecureChannel { // Panic code which requires a policy fn expect_supported_security_policy(&self) { match self.security_policy { - SecurityPolicy::Basic128Rsa15 | - SecurityPolicy::Basic256 | - SecurityPolicy::Basic256Sha256 | - SecurityPolicy::Aes128Sha256RsaOaep | - SecurityPolicy::Aes256Sha256RsaPss => {} + SecurityPolicy::Basic128Rsa15 + | SecurityPolicy::Basic256 + | SecurityPolicy::Basic256Sha256 + | SecurityPolicy::Aes128Sha256RsaOaep + | SecurityPolicy::Aes256Sha256RsaPss => {} _ => { panic!("Unsupported security policy"); } diff --git a/core/src/comms/security_header.rs b/core/src/comms/security_header.rs index 0ed711eec..86066d849 100644 --- a/core/src/comms/security_header.rs +++ b/core/src/comms/security_header.rs @@ -4,11 +4,11 @@ use std::io::{Read, Write}; -use opcua_types::*; -use opcua_types::status_code::StatusCode; use opcua_types::constants; +use opcua_types::status_code::StatusCode; +use opcua_types::*; -use opcua_crypto::{SecurityPolicy, X509, Thumbprint}; +use opcua_crypto::{SecurityPolicy, Thumbprint, X509}; /// Holds the security header associated with the chunk. Secure channel requests use an asymmetric /// security header, regular messages use a symmetric security header. @@ -21,15 +21,15 @@ pub enum SecurityHeader { impl BinaryEncoder for SecurityHeader { fn byte_len(&self) -> usize { match self { - SecurityHeader::Asymmetric(value) => { value.byte_len() } - SecurityHeader::Symmetric(value) => { value.byte_len() } + SecurityHeader::Asymmetric(value) => value.byte_len(), + SecurityHeader::Symmetric(value) => value.byte_len(), } } fn encode(&self, stream: &mut S) -> EncodingResult { match self { - SecurityHeader::Asymmetric(value) => { value.encode(stream) } - SecurityHeader::Symmetric(value) => { value.encode(stream) } + SecurityHeader::Asymmetric(value) => value.encode(stream), + SecurityHeader::Symmetric(value) => value.encode(stream), } } @@ -54,9 +54,7 @@ impl BinaryEncoder for SymmetricSecurityHeader { fn decode(stream: &mut S, decoding_limits: &DecodingLimits) -> EncodingResult { let token_id = u32::decode(stream, decoding_limits)?; - Ok(SymmetricSecurityHeader { - token_id - }) + Ok(SymmetricSecurityHeader { token_id }) } } @@ -91,14 +89,32 @@ impl BinaryEncoder for AsymmetricSecurityHeader { let receiver_certificate_thumbprint = ByteString::decode(stream, decoding_limits)?; // validate sender_certificate_length < MaxCertificateSize - if sender_certificate.value.is_some() && sender_certificate.value.as_ref().unwrap().len() >= constants::MAX_CERTIFICATE_LENGTH as usize { + if sender_certificate.value.is_some() + && sender_certificate.value.as_ref().unwrap().len() + >= constants::MAX_CERTIFICATE_LENGTH as usize + { error!("Sender certificate exceeds max certificate size"); Err(StatusCode::BadDecodingError) } else { // validate receiver_certificate_thumbprint_length == 20 - let thumbprint_len = if receiver_certificate_thumbprint.value.is_some() { receiver_certificate_thumbprint.value.as_ref().unwrap().len() } else { 0 }; + let thumbprint_len = if receiver_certificate_thumbprint.value.is_some() { + receiver_certificate_thumbprint + .value + .as_ref() + .unwrap() + .len() + } else { + 0 + }; if thumbprint_len > 0 && thumbprint_len != Thumbprint::THUMBPRINT_SIZE { - error!("Receiver certificate thumbprint is not 20 bytes long, {} bytes", receiver_certificate_thumbprint.value.as_ref().unwrap().len()); + error!( + "Receiver certificate thumbprint is not 20 bytes long, {} bytes", + receiver_certificate_thumbprint + .value + .as_ref() + .unwrap() + .len() + ); Err(StatusCode::BadDecodingError) } else { Ok(AsymmetricSecurityHeader { @@ -120,7 +136,11 @@ impl AsymmetricSecurityHeader { } } - pub fn new(security_policy: SecurityPolicy, sender_certificate: &X509, receiver_certificate_thumbprint: ByteString) -> AsymmetricSecurityHeader { + pub fn new( + security_policy: SecurityPolicy, + sender_certificate: &X509, + receiver_certificate_thumbprint: ByteString, + ) -> AsymmetricSecurityHeader { AsymmetricSecurityHeader { security_policy_uri: UAString::from(security_policy.to_uri()), sender_certificate: sender_certificate.as_byte_string(), diff --git a/core/src/comms/tcp_codec.rs b/core/src/comms/tcp_codec.rs index 911877065..03305194b 100644 --- a/core/src/comms/tcp_codec.rs +++ b/core/src/comms/tcp_codec.rs @@ -22,11 +22,12 @@ use opcua_types::{ status_code::StatusCode, }; -use crate::{ - comms::{ - message_chunk::MessageChunk, - tcp_types::{AcknowledgeMessage, ErrorMessage, HelloMessage, MESSAGE_HEADER_LEN, MessageHeader, MessageType}, - } +use crate::comms::{ + message_chunk::MessageChunk, + tcp_types::{ + AcknowledgeMessage, ErrorMessage, HelloMessage, MessageHeader, MessageType, + MESSAGE_HEADER_LEN, + }, }; #[derive(Debug)] @@ -55,7 +56,6 @@ impl Decoder for TcpCodec { debug!("TcpCodec decode abort flag has been set and is terminating"); Err(io::Error::from(StatusCode::BadOperationAbandoned)) } else if buf.len() > MESSAGE_HEADER_LEN { - // Every OPC UA message has at least 8 bytes of header to be read to see what follows // Get the message header @@ -112,14 +112,15 @@ impl TcpCodec { } // Writes the encodable thing into the buffer. - fn write(&self, msg: T, buf: &mut BytesMut) -> Result<(), io::Error> where T: BinaryEncoder + std::fmt::Debug { + fn write(&self, msg: T, buf: &mut BytesMut) -> Result<(), io::Error> + where + T: BinaryEncoder + std::fmt::Debug, + { buf.reserve(msg.byte_len()); - msg.encode(&mut buf.writer()) - .map(|_| ()) - .map_err(|err| { - error!("Error writing message {:?}, err = {}", msg, err); - io::Error::new(io::ErrorKind::Other, format!("Error = {}", err)) - }) + msg.encode(&mut buf.writer()).map(|_| ()).map_err(|err| { + error!("Error writing message {:?}, err = {}", msg, err); + io::Error::new(io::ErrorKind::Other, format!("Error = {}", err)) + }) } fn is_abort(&self) -> bool { @@ -128,21 +129,29 @@ impl TcpCodec { } /// Reads a message out of the buffer, which is assumed by now to be the proper length - fn decode_message(message_header: MessageHeader, buf: &mut BytesMut, decoding_limits: &DecodingLimits) -> Result { + fn decode_message( + message_header: MessageHeader, + buf: &mut BytesMut, + decoding_limits: &DecodingLimits, + ) -> Result { let mut buf = io::Cursor::new(&buf[..]); match message_header.message_type { - MessageType::Acknowledge => { - Ok(Message::Acknowledge(AcknowledgeMessage::decode(&mut buf, decoding_limits)?)) - } - MessageType::Hello => { - Ok(Message::Hello(HelloMessage::decode(&mut buf, decoding_limits)?)) - } - MessageType::Error => { - Ok(Message::Error(ErrorMessage::decode(&mut buf, decoding_limits)?)) - } - MessageType::Chunk => { - Ok(Message::Chunk(MessageChunk::decode(&mut buf, decoding_limits)?)) - } + MessageType::Acknowledge => Ok(Message::Acknowledge(AcknowledgeMessage::decode( + &mut buf, + decoding_limits, + )?)), + MessageType::Hello => Ok(Message::Hello(HelloMessage::decode( + &mut buf, + decoding_limits, + )?)), + MessageType::Error => Ok(Message::Error(ErrorMessage::decode( + &mut buf, + decoding_limits, + )?)), + MessageType::Chunk => Ok(Message::Chunk(MessageChunk::decode( + &mut buf, + decoding_limits, + )?)), MessageType::Invalid => { error!("Message type for chunk is invalid."); Err(StatusCode::BadCommunicationError) diff --git a/core/src/comms/tcp_types.rs b/core/src/comms/tcp_types.rs index 1264835f6..a9cdfb649 100644 --- a/core/src/comms/tcp_types.rs +++ b/core/src/comms/tcp_types.rs @@ -7,10 +7,7 @@ use std::io::{Cursor, Error, ErrorKind, Read, Result, Write}; use opcua_types::{ - encoding::*, - service_types::EndpointDescription, - status_code::StatusCode, - string::UAString, + encoding::*, service_types::EndpointDescription, status_code::StatusCode, string::UAString, }; use crate::comms::url::url_matches_except_host; @@ -96,12 +93,18 @@ impl MessageHeader { /// Reads the bytes of the stream to a buffer. If first 4 bytes are invalid, /// code returns an error - pub fn read_bytes(stream: &mut S, decoding_limits: &DecodingLimits) -> Result> { + pub fn read_bytes( + stream: &mut S, + decoding_limits: &DecodingLimits, + ) -> Result> { // Read the bytes of the stream into a vector let mut header = [0u8; 4]; stream.read_exact(&mut header)?; if MessageHeader::message_type(&header) == MessageType::Invalid { - return Err(Error::new(ErrorKind::Other, "Message type is not recognized, cannot read bytes")); + return Err(Error::new( + ErrorKind::Other, + "Message type is not recognized, cannot read bytes", + )); } let message_size = u32::decode(stream, decoding_limits); if message_size.is_err() { @@ -113,12 +116,18 @@ impl MessageHeader { let mut out = Cursor::new(Vec::with_capacity(message_size as usize)); let result = out.write(&header); if result.is_err() { - return Err(Error::new(ErrorKind::Other, "Cannot write message header to buffer ")); + return Err(Error::new( + ErrorKind::Other, + "Cannot write message header to buffer ", + )); } let result = message_size.encode(&mut out); if result.is_err() { - return Err(Error::new(ErrorKind::Other, "Cannot write message size to buffer ")); + return Err(Error::new( + ErrorKind::Other, + "Cannot write message size to buffer ", + )); } let pos = out.position() as usize; @@ -138,7 +147,9 @@ impl MessageHeader { HELLO_MESSAGE => MessageType::Hello, ACKNOWLEDGE_MESSAGE => MessageType::Acknowledge, ERROR_MESSAGE => MessageType::Error, - CHUNK_MESSAGE | OPEN_SECURE_CHANNEL_MESSAGE | CLOSE_SECURE_CHANNEL_MESSAGE => MessageType::Chunk, + CHUNK_MESSAGE | OPEN_SECURE_CHANNEL_MESSAGE | CLOSE_SECURE_CHANNEL_MESSAGE => { + MessageType::Chunk + } _ => { error!("message type doesn't match anything"); MessageType::Invalid @@ -148,7 +159,7 @@ impl MessageHeader { // Check the 4th byte which should be F for messages or F, C or A for chunks. If its // not one of those, the message is invalid match t[3] { - CHUNK_FINAL => { message_type } + CHUNK_FINAL => message_type, CHUNK_INTERMEDIATE | CHUNK_FINAL_ERROR => { if message_type == MessageType::Chunk { message_type @@ -156,9 +167,7 @@ impl MessageHeader { MessageType::Invalid } } - _ => { - MessageType::Invalid - } + _ => MessageType::Invalid, } } } @@ -218,7 +227,12 @@ impl HelloMessage { const MAX_URL_LEN: usize = 4096; /// Creates a HEL message - pub fn new(endpoint_url: &str, send_buffer_size: usize, receive_buffer_size: usize, max_message_size: usize) -> HelloMessage { + pub fn new( + endpoint_url: &str, + send_buffer_size: usize, + receive_buffer_size: usize, + max_message_size: usize, + ) -> HelloMessage { let mut msg = HelloMessage { message_header: MessageHeader::new(MessageType::Hello), protocol_version: 0, @@ -262,7 +276,8 @@ impl HelloMessage { pub fn is_valid_buffer_sizes(&self) -> bool { // Set in part 6 as minimum transport buffer size - self.receive_buffer_size >= MIN_CHUNK_SIZE as u32 && self.send_buffer_size >= MIN_CHUNK_SIZE as u32 + self.receive_buffer_size >= MIN_CHUNK_SIZE as u32 + && self.send_buffer_size >= MIN_CHUNK_SIZE as u32 } } diff --git a/core/src/comms/url.rs b/core/src/comms/url.rs index dbfccd03a..19d54bcf7 100644 --- a/core/src/comms/url.rs +++ b/core/src/comms/url.rs @@ -8,10 +8,7 @@ use std; use ::url::Url; -use opcua_types::{ - constants::DEFAULT_OPC_UA_SERVER_PORT, - status_code::StatusCode, -}; +use opcua_types::{constants::DEFAULT_OPC_UA_SERVER_PORT, status_code::StatusCode}; pub const OPC_TCP_SCHEME: &str = "opc.tcp"; @@ -73,18 +70,17 @@ pub fn url_matches_except_host(url1: &str, url2: &str) -> bool { /// Takes an endpoint url and strips off the path and args to leave just the protocol, host & port. pub fn server_url_from_endpoint_url(endpoint_url: &str) -> std::result::Result { - opc_url_from_str(endpoint_url) - .map(|mut url| { - url.set_path(""); - url.set_query(None); - if let Some(port) = url.port() { - // If the port is the default, strip it so the url string omits it. - if port == DEFAULT_OPC_UA_SERVER_PORT { - let _ = url.set_port(None); - } + opc_url_from_str(endpoint_url).map(|mut url| { + url.set_path(""); + url.set_query(None); + if let Some(port) = url.port() { + // If the port is the default, strip it so the url string omits it. + if port == DEFAULT_OPC_UA_SERVER_PORT { + let _ = url.set_port(None); } - url.into_string() - }) + } + url.into_string() + }) } pub fn is_valid_opc_ua_url(url: &str) -> bool { @@ -114,8 +110,7 @@ pub fn hostname_from_url(url: &str) -> Result { pub fn hostname_port_from_url(url: &str, default_port: u16) -> Result<(String, u16), StatusCode> { // Validate and split out the endpoint we have - let url = Url::parse(url) - .map_err(|_| StatusCode::BadTcpEndpointUrlInvalid)?; + let url = Url::parse(url).map_err(|_| StatusCode::BadTcpEndpointUrlInvalid)?; if url.scheme() != OPC_TCP_SCHEME || !url.has_host() { Err(StatusCode::BadTcpEndpointUrlInvalid) @@ -126,7 +121,6 @@ pub fn hostname_port_from_url(url: &str, default_port: u16) -> Result<(String, u } } - #[cfg(test)] mod tests { use super::*; @@ -134,7 +128,9 @@ mod tests { #[test] fn url_scheme() { assert!(is_opc_ua_binary_url("opc.tcp://foo/xyz")); - assert!(is_opc_ua_binary_url("opc.tcp://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80/xyz")); + assert!(is_opc_ua_binary_url( + "opc.tcp://[FEDC:BA98:7654:3210:FEDC:BA98:7654:3210]:80/xyz" + )); assert!(!is_opc_ua_binary_url("http://foo/xyz")); } @@ -143,23 +139,53 @@ mod tests { assert!(url_matches("opc.tcp://foo/", "opc.tcp://foo:4840/")); assert!(!url_matches("opc.tcp://foo/", "opc.tcp://foo:4841/")); assert!(!url_matches("opc.tcp://foo/xyz", "opc.tcp://bar/xyz")); - assert!(url_matches_except_host("opc.tcp://localhost/xyz", "opc.tcp://127.0.0.1/xyz")); - assert!(!url_matches_except_host("opc.tcp://localhost/xyz", "opc.tcp://127.0.0.1/abc")); + assert!(url_matches_except_host( + "opc.tcp://localhost/xyz", + "opc.tcp://127.0.0.1/xyz" + )); + assert!(!url_matches_except_host( + "opc.tcp://localhost/xyz", + "opc.tcp://127.0.0.1/abc" + )); } #[test] fn server_url_from_endpoint_url_test() { - assert_eq!("opc.tcp://localhost/", server_url_from_endpoint_url("opc.tcp://localhost").unwrap()); - assert_eq!("opc.tcp://localhost/", server_url_from_endpoint_url("opc.tcp://localhost:4840").unwrap()); - assert_eq!("opc.tcp://localhost:4841/", server_url_from_endpoint_url("opc.tcp://localhost:4841").unwrap()); - assert_eq!("opc.tcp://localhost/", server_url_from_endpoint_url("opc.tcp://localhost/xyz/abc?1").unwrap()); - assert_eq!("opc.tcp://localhost:999/", server_url_from_endpoint_url("opc.tcp://localhost:999/xyz/abc?1").unwrap()); + assert_eq!( + "opc.tcp://localhost/", + server_url_from_endpoint_url("opc.tcp://localhost").unwrap() + ); + assert_eq!( + "opc.tcp://localhost/", + server_url_from_endpoint_url("opc.tcp://localhost:4840").unwrap() + ); + assert_eq!( + "opc.tcp://localhost:4841/", + server_url_from_endpoint_url("opc.tcp://localhost:4841").unwrap() + ); + assert_eq!( + "opc.tcp://localhost/", + server_url_from_endpoint_url("opc.tcp://localhost/xyz/abc?1").unwrap() + ); + assert_eq!( + "opc.tcp://localhost:999/", + server_url_from_endpoint_url("opc.tcp://localhost:999/xyz/abc?1").unwrap() + ); } #[test] fn url_with_replaced_hostname_test() { - assert_eq!(url_with_replaced_hostname("opc.tcp://foo:123/x", "foo").unwrap(), "opc.tcp://foo:123/x"); - assert_eq!(url_with_replaced_hostname("opc.tcp://foo:123/x", "bar").unwrap(), "opc.tcp://bar:123/x"); - assert_eq!(url_with_replaced_hostname("opc.tcp://localhost:123/x", "127.0.0.1").unwrap(), "opc.tcp://127.0.0.1:123/x"); + assert_eq!( + url_with_replaced_hostname("opc.tcp://foo:123/x", "foo").unwrap(), + "opc.tcp://foo:123/x" + ); + assert_eq!( + url_with_replaced_hostname("opc.tcp://foo:123/x", "bar").unwrap(), + "opc.tcp://bar:123/x" + ); + assert_eq!( + url_with_replaced_hostname("opc.tcp://localhost:123/x", "127.0.0.1").unwrap(), + "opc.tcp://127.0.0.1:123/x" + ); } -} \ No newline at end of file +} diff --git a/core/src/comms/wrapped_tcp_stream.rs b/core/src/comms/wrapped_tcp_stream.rs index 76db0ee86..1384e1341 100644 --- a/core/src/comms/wrapped_tcp_stream.rs +++ b/core/src/comms/wrapped_tcp_stream.rs @@ -57,4 +57,4 @@ impl AsyncWrite for WrappedTcpStream { fn write_buf(&mut self, buf: &mut B) -> Poll { self.0.write_buf(buf) } -} \ No newline at end of file +} diff --git a/core/src/completion_pact.rs b/core/src/completion_pact.rs index d642f00d3..f746d2904 100644 --- a/core/src/completion_pact.rs +++ b/core/src/completion_pact.rs @@ -9,19 +9,21 @@ //! The problem is that tokio's stream listener `for_each` will run forever and there is no //! way to break out of it. The solution is to wrap their future inside another which checks for //! a complete signal. And that's what this does. -use futures::{Async, Stream, Poll}; +use futures::{Async, Poll, Stream}; pub struct CompletionPact - where S: Stream, - C: Stream, +where + S: Stream, + C: Stream, { stream: S, completer: C, } pub fn stream_completion_pact(s: S, c: C) -> CompletionPact - where S: Stream, - C: Stream, +where + S: Stream, + C: Stream, { CompletionPact { stream: s, @@ -30,24 +32,21 @@ pub fn stream_completion_pact(s: S, c: C) -> CompletionPact } impl Stream for CompletionPact - where S: Stream, - C: Stream, +where + S: Stream, + C: Stream, { type Item = S::Item; type Error = S::Error; fn poll(&mut self) -> Poll, S::Error> { match self.completer.poll() { - Ok(Async::Ready(None)) | - Err(_) | - Ok(Async::Ready(Some(_))) => { + Ok(Async::Ready(None)) | Err(_) | Ok(Async::Ready(Some(_))) => { // We are done, forget us debug!("Completer has triggered, indicating completion of the job"); Ok(Async::Ready(None)) } - Ok(Async::NotReady) => { - self.stream.poll() - } + Ok(Async::NotReady) => self.stream.poll(), } } -} \ No newline at end of file +} diff --git a/core/src/config.rs b/core/src/config.rs index 72a16f3c9..713b79b74 100644 --- a/core/src/config.rs +++ b/core/src/config.rs @@ -10,8 +10,8 @@ use std::result::Result; use serde; use serde_yaml; -use opcua_types::{LocalizedText, UAString}; use opcua_types::service_types::{ApplicationDescription, ApplicationType}; +use opcua_types::{LocalizedText, UAString}; /// A trait that handles the loading / saving and validity of configuration information for a /// client and/or server. @@ -35,7 +35,10 @@ pub trait Config: serde::Serialize { Err(()) } - fn load(path: &Path) -> Result where for<'de> A: Config + serde::Deserialize<'de> { + fn load(path: &Path) -> Result + where + for<'de> A: Config + serde::Deserialize<'de>, + { if let Ok(mut f) = File::open(path) { let mut s = String::new(); if f.read_to_string(&mut s).is_ok() { @@ -43,11 +46,17 @@ pub trait Config: serde::Serialize { if let Ok(config) = config { Ok(config) } else { - error!("Cannot deserialize configuration from {}", path.to_string_lossy()); + error!( + "Cannot deserialize configuration from {}", + path.to_string_lossy() + ); Err(()) } } else { - error!("Cannot read configuration file {} to string", path.to_string_lossy()); + error!( + "Cannot read configuration file {} to string", + path.to_string_lossy() + ); Err(()) } } else { @@ -66,7 +75,9 @@ pub trait Config: serde::Serialize { fn application_type(&self) -> ApplicationType; - fn discovery_urls(&self) -> Option> { None } + fn discovery_urls(&self) -> Option> { + None + } fn application_description(&self) -> ApplicationDescription { ApplicationDescription { diff --git a/core/src/handle.rs b/core/src/handle.rs index ae5e2c8f9..5ef2601dc 100644 --- a/core/src/handle.rs +++ b/core/src/handle.rs @@ -14,10 +14,7 @@ pub struct Handle { impl Handle { /// Creates a new handle factory, that starts with the supplied number pub fn new(first: u32) -> Handle { - Handle { - next: first, - first, - } + Handle { next: first, first } } /// Returns the next handle to be issued, internally incrementing each time so the handle diff --git a/core/src/lib.rs b/core/src/lib.rs index 4c7a4277b..d4301d422 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -24,7 +24,7 @@ macro_rules! supported_message_as { } else { panic!(); } - } + }; } /// Tracing macro for obtaining a lock on a `Mutex`. @@ -76,12 +76,10 @@ lazy_static! { /// Returns a vector of all currently existing runtime components as a vector of strings. #[macro_export] macro_rules! runtime_components { - () => { - { - use opcua_core::RUNTIME; - RUNTIME.components() - } - } + () => {{ + use opcua_core::RUNTIME; + RUNTIME.components() + }}; } /// This macro is for debugging purposes - code register a running component (e.g. tokio task) when it starts @@ -91,7 +89,7 @@ macro_rules! runtime_components { macro_rules! register_runtime_component { ( $component_name:expr ) => { RUNTIME.register_component($component_name); - } + }; } /// See `register_runtime_component` @@ -99,7 +97,7 @@ macro_rules! register_runtime_component { macro_rules! deregister_runtime_component { ( $component_name:expr ) => { RUNTIME.deregister_component($component_name); - } + }; } /// Contains debugging utility helper functions @@ -129,7 +127,11 @@ pub mod debug { char_line.clear(); } hex_line = format!("{} {:02x}", hex_line, value); - char_line.push(if value >= 32 && value <= 126 { value as char } else { '.' }); + char_line.push(if value >= 32 && value <= 126 { + value as char + } else { + '.' + }); } if last_line_padding > 0 { for _ in 0..last_line_padding { @@ -144,17 +146,17 @@ pub mod debug { mod tests; pub mod comms; +pub mod completion_pact; pub mod config; pub mod handle; pub mod runtime; -pub mod completion_pact; pub mod supported_message; /// Contains most of the things that are typically required from a client / server. pub mod prelude { - pub use opcua_types::*; - pub use opcua_types::status_code::StatusCode; pub use crate::comms::prelude::*; pub use crate::config::Config; pub use crate::supported_message::*; + pub use opcua_types::status_code::StatusCode; + pub use opcua_types::*; } diff --git a/core/src/runtime.rs b/core/src/runtime.rs index e175966bf..ec708f1c0 100644 --- a/core/src/runtime.rs +++ b/core/src/runtime.rs @@ -31,7 +31,10 @@ impl Runtime { running_components.iter().cloned().collect() } - pub fn register_component(&self, name: T) where T: Into { + pub fn register_component(&self, name: T) + where + T: Into, + { let key = name.into(); debug!("deregistering component {}", key); let mut running_components = trace_lock_unwrap!(self.running_components); @@ -41,12 +44,18 @@ impl Runtime { running_components.insert(key); } - pub fn deregister_component(&self, name: T) where T: Into { + pub fn deregister_component(&self, name: T) + where + T: Into, + { let key = name.into(); debug!("deregistering component {}", key); let mut running_components = trace_lock_unwrap!(self.running_components); if !running_components.contains(&key) { - trace!("Shouldn't be deregistering component {} which doesn't exist", key); + trace!( + "Shouldn't be deregistering component {} which doesn't exist", + key + ); } running_components.remove(&key); } diff --git a/core/src/tests/chunk.rs b/core/src/tests/chunk.rs index d42de04c6..ca5176175 100644 --- a/core/src/tests/chunk.rs +++ b/core/src/tests/chunk.rs @@ -2,16 +2,11 @@ extern crate rustc_serialize as serialize; use std::io::{Cursor, Write}; -use opcua_crypto::{SecurityPolicy, x509::X509}; +use opcua_crypto::{x509::X509, SecurityPolicy}; use opcua_types::DecodingLimits; use crate::{ - comms::{ - chunker::*, - message_chunk::*, - secure_channel::*, - tcp_types::MIN_CHUNK_SIZE, - }, + comms::{chunker::*, message_chunk::*, secure_channel::*, tcp_types::MIN_CHUNK_SIZE}, supported_message::SupportedMessage, tests::*, }; @@ -20,10 +15,11 @@ fn sample_secure_channel_request_data_security_none() -> MessageChunk { let sample_data = vec![ 47, 0, 0, 0, 104, 116, 116, 112, 58, 47, 47, 111, 112, 99, 102, 111, 117, 110, 100, 97, 116, 105, 111, 110, 46, 111, 114, 103, 47, 85, 65, 47, 83, 101, 99, 117, 114, 105, 116, - 121, 80, 111, 108, 105, 99, 121, 35, 78, 111, 110, 101, 255, 255, 255, 255, 255, 255, - 255, 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 190, 1, 0, 0, 208, 130, 196, 162, 147, 106, 210, - 1, 1, 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 1, 0, 0, 0, 255, 255, 255, 255, 192, 39, 9, 0]; + 121, 80, 111, 108, 105, 99, 121, 35, 78, 111, 110, 101, 255, 255, 255, 255, 255, 255, 255, + 255, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 190, 1, 0, 0, 208, 130, 196, 162, 147, 106, 210, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, + 0, 0, 255, 255, 255, 255, 192, 39, 9, 0, + ]; let data = vec![0u8; 12 + sample_data.len()]; let mut stream = Cursor::new(data); @@ -34,7 +30,8 @@ fn sample_secure_channel_request_data_security_none() -> MessageChunk { is_final: MessageIsFinalType::Final, message_size: 12 + sample_data.len() as u32, secure_channel_id: 1, - }.encode(&mut stream); + } + .encode(&mut stream); let _ = stream.write(&sample_data); // Decode chunk from stream @@ -42,12 +39,19 @@ fn sample_secure_channel_request_data_security_none() -> MessageChunk { let decoding_limits = DecodingLimits::default(); let chunk = MessageChunk::decode(&mut stream, &decoding_limits).unwrap(); - println!("Sample chunk info = {:?}", chunk.message_header(&decoding_limits).unwrap()); + println!( + "Sample chunk info = {:?}", + chunk.message_header(&decoding_limits).unwrap() + ); chunk } -fn set_chunk_sequence_number(chunk: &mut MessageChunk, secure_channel: &SecureChannel, sequence_number: u32) -> u32 { +fn set_chunk_sequence_number( + chunk: &mut MessageChunk, + secure_channel: &SecureChannel, + sequence_number: u32, +) -> u32 { // Read the sequence header let mut chunk_info = chunk.chunk_info(&secure_channel).unwrap(); let old_sequence_number = chunk_info.sequence_header.sequence_number; @@ -59,7 +63,11 @@ fn set_chunk_sequence_number(chunk: &mut MessageChunk, secure_channel: &SecureCh old_sequence_number } -fn set_chunk_request_id(chunk: &mut MessageChunk, secure_channel: &SecureChannel, request_id: u32) -> u32 { +fn set_chunk_request_id( + chunk: &mut MessageChunk, + secure_channel: &SecureChannel, + request_id: u32, +) -> u32 { // Read the sequence header let mut chunk_info = chunk.chunk_info(&secure_channel).unwrap(); let old_request_id = chunk_info.sequence_header.request_id; @@ -77,7 +85,8 @@ fn make_large_read_response() -> SupportedMessage { response_header: ResponseHeader::null(), results: Some(results), diagnostic_infos: None, - }.into() + } + .into() } /// Encode a very large message with a maximum chunk size and ensure that it turns into multiple chunks @@ -99,7 +108,15 @@ fn chunk_multi_encode_decode() { // Create a very large message let sequence_number = 1000; let request_id = 100; - let chunks = Chunker::encode(sequence_number, request_id, 0, MIN_CHUNK_SIZE, &secure_channel, &response).unwrap(); + let chunks = Chunker::encode( + sequence_number, + request_id, + 0, + MIN_CHUNK_SIZE, + &secure_channel, + &response, + ) + .unwrap(); assert!(chunks.len() > 1); // Verify chunk byte len maxes out at == 8196 @@ -123,7 +140,15 @@ fn chunk_multi_chunk_intermediate_final() { // Create a very large message let sequence_number = 1000; let request_id = 100; - let chunks = Chunker::encode(sequence_number, request_id, 0, MIN_CHUNK_SIZE, &secure_channel, &response).unwrap(); + let chunks = Chunker::encode( + sequence_number, + request_id, + 0, + MIN_CHUNK_SIZE, + &secure_channel, + &response, + ) + .unwrap(); assert!(chunks.len() > 1); let decoding_limits = DecodingLimits::default(); @@ -152,11 +177,27 @@ fn max_message_size() { let sequence_number = 1000; let request_id = 100; - let chunks = Chunker::encode(sequence_number, request_id, max_message_size, 0, &secure_channel, &response).unwrap(); + let chunks = Chunker::encode( + sequence_number, + request_id, + max_message_size, + 0, + &secure_channel, + &response, + ) + .unwrap(); assert_eq!(chunks.len(), 1); // Expect this to fail - let err = Chunker::encode(sequence_number, request_id, max_message_size - 1, 0, &secure_channel, &response).unwrap_err(); + let err = Chunker::encode( + sequence_number, + request_id, + max_message_size - 1, + 0, + &secure_channel, + &response, + ) + .unwrap_err(); assert_eq!(err, StatusCode::BadResponseTooLarge); } @@ -171,7 +212,15 @@ fn validate_chunks_secure_channel_id() { // Create a very large message let sequence_number = 1000; let request_id = 100; - let chunks = Chunker::encode(sequence_number, request_id, 0, MIN_CHUNK_SIZE, &secure_channel, &response).unwrap(); + let chunks = Chunker::encode( + sequence_number, + request_id, + 0, + MIN_CHUNK_SIZE, + &secure_channel, + &response, + ) + .unwrap(); assert!(chunks.len() > 1); // Expect this to work @@ -180,7 +229,10 @@ fn validate_chunks_secure_channel_id() { // Test secure channel id mismatch let old_secure_channel_id = secure_channel.secure_channel_id(); secure_channel.set_secure_channel_id(old_secure_channel_id + 1); - assert_eq!(Chunker::validate_chunks(sequence_number, &secure_channel, &chunks).unwrap_err(), StatusCode::BadSecureChannelIdInvalid); + assert_eq!( + Chunker::validate_chunks(sequence_number, &secure_channel, &chunks).unwrap_err(), + StatusCode::BadSecureChannelIdInvalid + ); } /// Encode a large message and then ensure verification throws error for non-consecutive sequence numbers @@ -194,11 +246,22 @@ fn validate_chunks_sequence_number() { // Create a very large message let sequence_number = 1000; let request_id = 100; - let mut chunks = Chunker::encode(sequence_number, request_id, 0, MIN_CHUNK_SIZE, &secure_channel, &response).unwrap(); + let mut chunks = Chunker::encode( + sequence_number, + request_id, + 0, + MIN_CHUNK_SIZE, + &secure_channel, + &response, + ) + .unwrap(); assert!(chunks.len() > 1); // Test sequence number cannot be < starting sequence number - assert_eq!(Chunker::validate_chunks(sequence_number + 5000, &secure_channel, &chunks).unwrap_err(), StatusCode::BadSequenceNumberInvalid); + assert_eq!( + Chunker::validate_chunks(sequence_number + 5000, &secure_channel, &chunks).unwrap_err(), + StatusCode::BadSequenceNumberInvalid + ); // Test sequence number is returned properly let result = Chunker::validate_chunks(sequence_number, &secure_channel, &chunks).unwrap(); @@ -206,12 +269,18 @@ fn validate_chunks_sequence_number() { // Hack one of the chunks to alter its seq id let old_sequence_nr = set_chunk_sequence_number(&mut chunks[0], &secure_channel, 1001); - assert_eq!(Chunker::validate_chunks(sequence_number, &secure_channel, &chunks).unwrap_err(), StatusCode::BadSecurityChecksFailed); + assert_eq!( + Chunker::validate_chunks(sequence_number, &secure_channel, &chunks).unwrap_err(), + StatusCode::BadSecurityChecksFailed + ); // Hack the nth set_chunk_sequence_number(&mut chunks[0], &secure_channel, old_sequence_nr); let _ = set_chunk_sequence_number(&mut chunks[5], &secure_channel, 1008); - assert_eq!(Chunker::validate_chunks(sequence_number, &secure_channel, &chunks).unwrap_err(), StatusCode::BadSecurityChecksFailed); + assert_eq!( + Chunker::validate_chunks(sequence_number, &secure_channel, &chunks).unwrap_err(), + StatusCode::BadSecurityChecksFailed + ); } /// Encode a large message and ensure verification throws error for request id mismatches @@ -225,7 +294,15 @@ fn validate_chunks_request_id() { // Create a very large message let sequence_number = 1000; let request_id = 100; - let mut chunks = Chunker::encode(sequence_number, request_id, 0, MIN_CHUNK_SIZE, &secure_channel, &response).unwrap(); + let mut chunks = Chunker::encode( + sequence_number, + request_id, + 0, + MIN_CHUNK_SIZE, + &secure_channel, + &response, + ) + .unwrap(); assert!(chunks.len() > 1); // Expect this to work @@ -233,7 +310,10 @@ fn validate_chunks_request_id() { // Hack the request id so first chunk request id says 101 while the rest say 100 let _ = set_chunk_request_id(&mut chunks[0], &secure_channel, 101); - assert_eq!(Chunker::validate_chunks(sequence_number, &secure_channel, &chunks).unwrap_err(), StatusCode::BadSecurityChecksFailed); + assert_eq!( + Chunker::validate_chunks(sequence_number, &secure_channel, &chunks).unwrap_err(), + StatusCode::BadSecurityChecksFailed + ); } /// Test creating a request, encoding it and decoding it. @@ -250,7 +330,9 @@ fn chunk_open_secure_channel() { let request = Chunker::decode(&chunks, &secure_channel, None).unwrap(); let request = match request { SupportedMessage::OpenSecureChannelRequest(request) => request, - _ => { panic!("Not a OpenSecureChannelRequest"); } + _ => { + panic!("Not a OpenSecureChannelRequest"); + } }; { let request_header = &request.request_header; @@ -264,14 +346,24 @@ fn chunk_open_secure_channel() { // Encode the message up again to chunks, decode and compare to original trace!("Encoding back to chunks"); - let chunks = Chunker::encode(1, 1, 0, 0, &secure_channel, &SupportedMessage::OpenSecureChannelRequest(request.clone())).unwrap(); + let chunks = Chunker::encode( + 1, + 1, + 0, + 0, + &secure_channel, + &SupportedMessage::OpenSecureChannelRequest(request.clone()), + ) + .unwrap(); assert_eq!(chunks.len(), 1); trace!("Decoding to compare the new version"); let new_request = Chunker::decode(&chunks, &secure_channel, None).unwrap(); let new_request = match new_request { SupportedMessage::OpenSecureChannelRequest(new_request) => new_request, - _ => { panic!("Not a OpenSecureChannelRequest"); } + _ => { + panic!("Not a OpenSecureChannelRequest"); + } }; assert_eq!(request, new_request); } @@ -288,7 +380,8 @@ fn open_secure_channel_response() { 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0xc1, 0x01, 0xe2, 0x50, 0x38, 0x9b, 0xa9, 0x71, 0xd2, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe2, - 0x50, 0x38, 0x9b, 0xa9, 0x71, 0xd2, 0x01, 0xc0, 0x27, 0x09, 0x00, 0xff, 0xff, 0xff, 0xff]; + 0x50, 0x38, 0x9b, 0xa9, 0x71, 0xd2, 0x01, 0xc0, 0x27, 0x09, 0x00, 0xff, 0xff, 0xff, 0xff, + ]; let _ = Test::setup(); @@ -307,7 +400,9 @@ fn open_secure_channel_response() { //debug!("message = {:#?}", message); let response = match message { SupportedMessage::OpenSecureChannelResponse(response) => response, - _ => { panic!("Not a OpenSecureChannelResponse"); } + _ => { + panic!("Not a OpenSecureChannelResponse"); + } }; assert_eq!(response.response_header.request_handle, 0); assert_eq!(response.response_header.service_result, StatusCode::Good); @@ -335,28 +430,40 @@ fn open_secure_channel() { client_nonce: ByteString::null(), requested_lifetime: 4664, }; - let new_open_secure_channel_request = serialize_test_and_return(open_secure_channel_request.clone()); + let new_open_secure_channel_request = + serialize_test_and_return(open_secure_channel_request.clone()); assert_eq!(open_secure_channel_request, new_open_secure_channel_request); // And the response let open_secure_channel_response = make_open_secure_channel_response(); - let new_open_secure_channel_response = serialize_test_and_return(open_secure_channel_response.clone()); - assert_eq!(open_secure_channel_response, new_open_secure_channel_response); + let new_open_secure_channel_response = + serialize_test_and_return(open_secure_channel_response.clone()); + assert_eq!( + open_secure_channel_response, + new_open_secure_channel_response + ); } #[test] fn security_policy_symmetric_encrypt_decrypt() { // Encrypt and decrypt directly to the security policy, make sure all is well - let (secure_channel1, secure_channel2) = make_secure_channels(MessageSecurityMode::SignAndEncrypt, SecurityPolicy::Basic128Rsa15); + let (secure_channel1, secure_channel2) = make_secure_channels( + MessageSecurityMode::SignAndEncrypt, + SecurityPolicy::Basic128Rsa15, + ); let src = vec![0u8; 100]; let mut dst = vec![0u8; 200]; - let encrypted_len = secure_channel1.symmetric_sign_and_encrypt(&src, 0..80, 20..100, &mut dst).unwrap(); + let encrypted_len = secure_channel1 + .symmetric_sign_and_encrypt(&src, 0..80, 20..100, &mut dst) + .unwrap(); assert_eq!(encrypted_len, 100); let mut src2 = vec![0u8; 200]; - let decrypted_len = secure_channel2.symmetric_decrypt_and_verify(&dst, 0..80, 20..100, &mut src2).unwrap(); + let decrypted_len = secure_channel2 + .symmetric_decrypt_and_verify(&dst, 0..80, 20..100, &mut src2) + .unwrap(); assert_eq!(decrypted_len, 100); // Compare the data, not the signature @@ -393,7 +500,9 @@ fn asymmetric_decrypt_and_verify_sample_chunk() { secure_channel.set_remote_cert(Some(their_cert)); secure_channel.set_private_key(Some(our_key)); - let _ = secure_channel.verify_and_remove_security_forensic(&message_data, Some(their_key)).unwrap(); + let _ = secure_channel + .verify_and_remove_security_forensic(&message_data, Some(their_key)) + .unwrap(); } #[test] diff --git a/core/src/tests/comms.rs b/core/src/tests/comms.rs index c65a0aec8..81c5bbc39 100644 --- a/core/src/tests/comms.rs +++ b/core/src/tests/comms.rs @@ -3,23 +3,22 @@ use std::io::*; use opcua_crypto::SecurityPolicy; use opcua_types::*; -use crate::comms::{ - secure_channel::*, - tcp_types::*, -}; +use crate::comms::{secure_channel::*, tcp_types::*}; fn hello_data() -> Vec { vec![ 0x48, 0x45, 0x4c, 0x46, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x00, 0x00, 0x00, 0x6f, 0x70, 0x63, 0x2e, 0x74, 0x63, 0x70, 0x3a, 0x2f, 0x2f, 0x31, 0x32, 0x37, - 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, 0x3a, 0x31, 0x32, 0x33, 0x34, 0x2f] + 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, 0x3a, 0x31, 0x32, 0x33, 0x34, 0x2f, + ] } fn ack_data() -> Vec { vec![ 0x41, 0x43, 0x4b, 0x46, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, - 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x01, 0xff, 0xff, 0x00, 0x00] + 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x01, 0xff, 0xff, 0x00, 0x00, + ] } #[test] @@ -35,7 +34,10 @@ pub fn hello() { assert_eq!(hello.send_buffer_size, 655360); assert_eq!(hello.max_message_size, 0); assert_eq!(hello.max_chunk_count, 0); - assert_eq!(hello.endpoint_url, UAString::from("opc.tcp://127.0.0.1:1234/")); + assert_eq!( + hello.endpoint_url, + UAString::from("opc.tcp://127.0.0.1:1234/") + ); } #[test] @@ -59,11 +61,25 @@ pub fn secure_channel_nonce() { sc.set_security_mode(MessageSecurityMode::SignAndEncrypt); sc.set_security_policy(SecurityPolicy::Basic256); // Nonce which is not 32 bytes long is an error - assert!(sc.set_remote_nonce_from_byte_string(&ByteString::null()).is_err()); - assert!(sc.set_remote_nonce_from_byte_string(&ByteString::from(b"")).is_err()); - assert!(sc.set_remote_nonce_from_byte_string(&ByteString::from(b"1")).is_err()); - assert!(sc.set_remote_nonce_from_byte_string(&ByteString::from(b"0123456789012345678901234567890")).is_err()); - assert!(sc.set_remote_nonce_from_byte_string(&ByteString::from(b"012345678901234567890123456789012".as_ref())).is_err()); + assert!(sc + .set_remote_nonce_from_byte_string(&ByteString::null()) + .is_err()); + assert!(sc + .set_remote_nonce_from_byte_string(&ByteString::from(b"")) + .is_err()); + assert!(sc + .set_remote_nonce_from_byte_string(&ByteString::from(b"1")) + .is_err()); + assert!(sc + .set_remote_nonce_from_byte_string(&ByteString::from(b"0123456789012345678901234567890")) + .is_err()); + assert!(sc + .set_remote_nonce_from_byte_string(&ByteString::from( + b"012345678901234567890123456789012".as_ref() + )) + .is_err()); // Nonce which is 32 bytes long is good - assert!(sc.set_remote_nonce_from_byte_string(&ByteString::from(b"01234567890123456789012345678901")).is_ok()); + assert!(sc + .set_remote_nonce_from_byte_string(&ByteString::from(b"01234567890123456789012345678901")) + .is_ok()); } diff --git a/core/src/tests/hello.rs b/core/src/tests/hello.rs index 9d7975418..ea9cad7dc 100644 --- a/core/src/tests/hello.rs +++ b/core/src/tests/hello.rs @@ -1,23 +1,20 @@ use opcua_types::{ byte_string::ByteString, - service_types::{ - ApplicationDescription, EndpointDescription, MessageSecurityMode, - }, + service_types::{ApplicationDescription, EndpointDescription, MessageSecurityMode}, string::UAString, }; -use crate::comms::{ - tcp_types::{ - MessageHeader, HelloMessage, MessageType, - }, -}; +use crate::comms::tcp_types::{HelloMessage, MessageHeader, MessageType}; #[test] fn endpoint_url() { // Ensure hello with None endpoint is invalid // Ensure hello with URL > 4096 chars is invalid let mut h = HelloMessage { - message_header: MessageHeader { message_type: MessageType::Invalid, message_size: 0 }, + message_header: MessageHeader { + message_type: MessageType::Invalid, + message_size: 0, + }, protocol_version: 0, receive_buffer_size: 0, send_buffer_size: 0, @@ -26,18 +23,16 @@ fn endpoint_url() { endpoint_url: UAString::null(), }; - let endpoints = vec![ - EndpointDescription { - endpoint_url: UAString::from("opc.tcp://foo"), - security_policy_uri: UAString::null(), - security_mode: MessageSecurityMode::None, - server: ApplicationDescription::default(), - security_level: 0, - server_certificate: ByteString::null(), - transport_profile_uri: UAString::null(), - user_identity_tokens: None, - } - ]; + let endpoints = vec![EndpointDescription { + endpoint_url: UAString::from("opc.tcp://foo"), + security_policy_uri: UAString::null(), + security_mode: MessageSecurityMode::None, + server: ApplicationDescription::default(), + security_level: 0, + server_certificate: ByteString::null(), + transport_profile_uri: UAString::null(), + user_identity_tokens: None, + }]; // Negative tests assert!(!h.matches_endpoint(&endpoints)); @@ -62,7 +57,10 @@ fn endpoint_url() { fn valid_buffer_sizes() { // Test that invalid buffer sizes are rejected, while valid buffer sizes are accepted let mut h = HelloMessage { - message_header: MessageHeader { message_type: MessageType::Invalid, message_size: 0 }, + message_header: MessageHeader { + message_type: MessageType::Invalid, + message_size: 0, + }, protocol_version: 0, receive_buffer_size: 0, send_buffer_size: 0, @@ -79,4 +77,4 @@ fn valid_buffer_sizes() { assert!(!h.is_valid_buffer_sizes()); h.send_buffer_size = 8196; assert!(h.is_valid_buffer_sizes()); -} \ No newline at end of file +} diff --git a/core/src/tests/mod.rs b/core/src/tests/mod.rs index f7f85f6d4..d4ccecc38 100644 --- a/core/src/tests/mod.rs +++ b/core/src/tests/mod.rs @@ -3,20 +3,17 @@ use std::fmt::Debug; use std::io::Cursor; use opcua_crypto::{ - pkey::PrivateKey, security_policy::SecurityPolicy, x509::{X509, X509Data}, -}; -use opcua_types::{ - *, - status_code::StatusCode, + pkey::PrivateKey, + security_policy::SecurityPolicy, + x509::{X509Data, X509}, }; +use opcua_types::{status_code::StatusCode, *}; -use crate::{ - comms::secure_channel::SecureChannel, - supported_message::SupportedMessage, -}; +use crate::{comms::secure_channel::SecureChannel, supported_message::SupportedMessage}; pub fn serialize_test_and_return(value: T) -> T - where T: BinaryEncoder + Debug + PartialEq +where + T: BinaryEncoder + Debug + PartialEq, { // Ask the struct for its byte length let byte_len = value.byte_len(); @@ -47,13 +44,19 @@ pub fn serialize_test_and_return(value: T) -> T } pub fn serialize_test(value: T) - where T: BinaryEncoder + Debug + PartialEq +where + T: BinaryEncoder + Debug + PartialEq, { let _ = serialize_test_and_return(value); } /// Makes a secure channel -fn make_secure_channel(security_mode: MessageSecurityMode, security_policy: SecurityPolicy, local_nonce: Vec, remote_nonce: Vec) -> SecureChannel { +fn make_secure_channel( + security_mode: MessageSecurityMode, + security_policy: SecurityPolicy, + local_nonce: Vec, + remote_nonce: Vec, +) -> SecureChannel { let mut secure_channel = SecureChannel::new_no_certificate_store(); secure_channel.set_security_mode(security_mode); secure_channel.set_security_policy(security_policy); @@ -64,12 +67,27 @@ fn make_secure_channel(security_mode: MessageSecurityMode, security_policy: Secu } /// Makes a pair of secure channels representing local and remote side to test crypto -fn make_secure_channels(security_mode: MessageSecurityMode, security_policy: SecurityPolicy) -> (SecureChannel, SecureChannel) { +fn make_secure_channels( + security_mode: MessageSecurityMode, + security_policy: SecurityPolicy, +) -> (SecureChannel, SecureChannel) { let local_nonce = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]; - let remote_nonce = vec![16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]; - - let secure_channel1 = make_secure_channel(security_mode, security_policy, local_nonce.clone(), remote_nonce.clone()); - let secure_channel2 = make_secure_channel(security_mode, security_policy, remote_nonce.clone(), local_nonce.clone()); + let remote_nonce = vec![ + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + ]; + + let secure_channel1 = make_secure_channel( + security_mode, + security_policy, + local_nonce.clone(), + remote_nonce.clone(), + ); + let secure_channel2 = make_secure_channel( + security_mode, + security_policy, + remote_nonce.clone(), + local_nonce.clone(), + ); (secure_channel1, secure_channel2) } @@ -108,7 +126,8 @@ fn make_sample_message() -> SupportedMessage { endpoint_url: UAString::null(), locale_ids: None, profile_uris: None, - }.into() + } + .into() } fn make_test_cert(key_size: u32) -> (X509, PrivateKey) { @@ -121,16 +140,26 @@ fn make_test_cert(key_size: u32) -> (X509, PrivateKey) { organizational_unit: "x.org ops".to_string(), country: "EN".to_string(), state: "London".to_string(), - alt_host_names: vec![APPLICATION_URI.to_string(), "foo".to_string(), "foo2".to_string(), APPLICATION_HOSTNAME.to_string(), "foo3".to_string()], + alt_host_names: vec![ + APPLICATION_URI.to_string(), + "foo".to_string(), + "foo2".to_string(), + APPLICATION_HOSTNAME.to_string(), + "foo3".to_string(), + ], certificate_duration_days: 60, }; let cert = X509::cert_and_pkey(&args); cert.unwrap() } -fn make_test_cert_2048() -> (X509, PrivateKey) { make_test_cert(2048) } +fn make_test_cert_2048() -> (X509, PrivateKey) { + make_test_cert(2048) +} -fn make_test_cert_4096() -> (X509, PrivateKey) { make_test_cert(4096) } +fn make_test_cert_4096() -> (X509, PrivateKey) { + make_test_cert(4096) +} struct Test; @@ -141,8 +170,8 @@ impl Test { } mod chunk; -mod services; mod comms; -mod secure_channel; mod hello; +mod secure_channel; +mod services; mod supported_message; diff --git a/core/src/tests/secure_channel.rs b/core/src/tests/secure_channel.rs index 5f1fdd305..851743560 100644 --- a/core/src/tests/secure_channel.rs +++ b/core/src/tests/secure_channel.rs @@ -1,7 +1,6 @@ //! These tests are specifically testing secure channel behaviour of signing, encrypting, decrypting and verifying //! chunks containing messages - use opcua_crypto::SecurityPolicy; use crate::comms::chunker::*; @@ -9,8 +8,13 @@ use crate::comms::secure_channel::*; use crate::tests::*; -fn test_symmetric_encrypt_decrypt(message: SupportedMessage, security_mode: MessageSecurityMode, security_policy: SecurityPolicy) { - let (secure_channel1, mut secure_channel2) = make_secure_channels(security_mode, security_policy); +fn test_symmetric_encrypt_decrypt( + message: SupportedMessage, + security_mode: MessageSecurityMode, + security_policy: SecurityPolicy, +) { + let (secure_channel1, mut secure_channel2) = + make_secure_channels(security_mode, security_policy); let mut chunks = Chunker::encode(1, 1, 0, 0, &secure_channel1, &message).unwrap(); assert_eq!(chunks.len(), 1); @@ -19,12 +23,16 @@ fn test_symmetric_encrypt_decrypt(message: SupportedMessage, security_mode: Mess let chunk = &mut chunks[0]; let mut encrypted_data = vec![0u8; chunk.data.len() + 4096]; - let encrypted_size = secure_channel1.apply_security(&chunk, &mut encrypted_data[..]).unwrap(); + let encrypted_size = secure_channel1 + .apply_security(&chunk, &mut encrypted_data[..]) + .unwrap(); trace!("Result of applying security = {}", encrypted_size); - // Decrypted message should identical to original with same length and + // Decrypted message should identical to original with same length and // no signature or padding - let chunk2 = secure_channel2.verify_and_remove_security(&encrypted_data[..encrypted_size]).unwrap(); + let chunk2 = secure_channel2 + .verify_and_remove_security(&encrypted_data[..encrypted_size]) + .unwrap(); // Why offset 12? So we don't compare message_size part which may differ when padding is added. Less than ideal // TODO padding should be stripped from removed security and the message size should be same @@ -35,15 +43,27 @@ fn test_symmetric_encrypt_decrypt(message: SupportedMessage, security_mode: Mess assert_eq!(message, message2); } -fn test_asymmetric_encrypt_decrypt(message: SupportedMessage, security_mode: MessageSecurityMode, security_policy: SecurityPolicy) { +fn test_asymmetric_encrypt_decrypt( + message: SupportedMessage, + security_mode: MessageSecurityMode, + security_policy: SecurityPolicy, +) { // Do the test twice using a large key to a small key and vice versa so any issues with padding, // extra padding are caught in both directions. for i in 0..2 { // Create a cert and private key pretending to be us and them. Keysizes are different to shake out issues with // signature lengths. Encrypting key will be 4096 bits to test extra padding functionality. - let (our_cert, our_key) = if i == 0 { make_test_cert_4096() } else { make_test_cert_2048() }; + let (our_cert, our_key) = if i == 0 { + make_test_cert_4096() + } else { + make_test_cert_2048() + }; // let (our_cert, our_key) = make_test_cert_1024(); - let (their_cert, their_key) = if i == 0 { make_test_cert_2048() } else { make_test_cert_4096() }; + let (their_cert, their_key) = if i == 0 { + make_test_cert_2048() + } else { + make_test_cert_4096() + }; let mut secure_channel = SecureChannel::new_no_certificate_store(); secure_channel.set_security_mode(security_mode); @@ -60,7 +80,9 @@ fn test_asymmetric_encrypt_decrypt(message: SupportedMessage, security_mode: Mes let chunk = &mut chunks[0]; let mut encrypted_data = vec![0u8; chunk.data.len() + 4096]; - let encrypted_size = secure_channel.apply_security(&chunk, &mut encrypted_data[..]).unwrap(); + let encrypted_size = secure_channel + .apply_security(&chunk, &mut encrypted_data[..]) + .unwrap(); trace!("Result of applying security = {}", encrypted_size); // Now we shall try to decrypt what has been encrypted by flipping the keys around @@ -71,7 +93,9 @@ fn test_asymmetric_encrypt_decrypt(message: SupportedMessage, security_mode: Mes secure_channel.set_private_key(Some(their_key)); // Compare up to original length - let chunk2 = secure_channel.verify_and_remove_security(&encrypted_data[..encrypted_size]).unwrap(); + let chunk2 = secure_channel + .verify_and_remove_security(&encrypted_data[..encrypted_size]) + .unwrap(); assert_eq!(chunk.data.len(), chunk2.data.len()); assert_eq!(&chunk.data[12..], &chunk2.data[12..chunk2.data.len()]); } @@ -81,21 +105,33 @@ fn test_asymmetric_encrypt_decrypt(message: SupportedMessage, security_mode: Mes fn asymmetric_sign_and_encrypt_message_chunk_basic128rsa15() { let _ = Test::setup(); error!("asymmetric_sign_and_encrypt_message_chunk_basic128rsa15"); - test_asymmetric_encrypt_decrypt(make_open_secure_channel_response().into(), MessageSecurityMode::SignAndEncrypt, SecurityPolicy::Basic128Rsa15); + test_asymmetric_encrypt_decrypt( + make_open_secure_channel_response().into(), + MessageSecurityMode::SignAndEncrypt, + SecurityPolicy::Basic128Rsa15, + ); } #[test] fn asymmetric_sign_and_encrypt_message_chunk_basic256() { let _ = Test::setup(); error!("asymmetric_sign_and_encrypt_message_chunk_basic256"); - test_asymmetric_encrypt_decrypt(make_open_secure_channel_response().into(), MessageSecurityMode::SignAndEncrypt, SecurityPolicy::Basic256); + test_asymmetric_encrypt_decrypt( + make_open_secure_channel_response().into(), + MessageSecurityMode::SignAndEncrypt, + SecurityPolicy::Basic256, + ); } #[test] fn asymmetric_sign_and_encrypt_message_chunk_basic256sha256() { let _ = Test::setup(); error!("asymmetric_sign_and_encrypt_message_chunk_basic256sha256"); - test_asymmetric_encrypt_decrypt(make_open_secure_channel_response().into(), MessageSecurityMode::SignAndEncrypt, SecurityPolicy::Basic256Sha256); + test_asymmetric_encrypt_decrypt( + make_open_secure_channel_response().into(), + MessageSecurityMode::SignAndEncrypt, + SecurityPolicy::Basic256Sha256, + ); } /// Create a message, encode it to a chunk, sign the chunk, verify the signature and decode back to message @@ -103,21 +139,33 @@ fn asymmetric_sign_and_encrypt_message_chunk_basic256sha256() { fn symmetric_sign_message_chunk_basic128rsa15() { let _ = Test::setup(); error!("symmetric_sign_message_chunk_basic128rsa15"); - test_symmetric_encrypt_decrypt(make_sample_message(), MessageSecurityMode::Sign, SecurityPolicy::Basic128Rsa15); + test_symmetric_encrypt_decrypt( + make_sample_message(), + MessageSecurityMode::Sign, + SecurityPolicy::Basic128Rsa15, + ); } #[test] fn symmetric_sign_message_chunk_basic256() { let _ = Test::setup(); error!("symmetric_sign_message_chunk_basic256"); - test_symmetric_encrypt_decrypt(make_sample_message(), MessageSecurityMode::Sign, SecurityPolicy::Basic256); + test_symmetric_encrypt_decrypt( + make_sample_message(), + MessageSecurityMode::Sign, + SecurityPolicy::Basic256, + ); } #[test] fn symmetric_sign_message_chunk_basic256sha256() { let _ = Test::setup(); error!("symmetric_sign_message_chunk_basic256sha256"); - test_symmetric_encrypt_decrypt(make_sample_message(), MessageSecurityMode::Sign, SecurityPolicy::Basic256Sha256); + test_symmetric_encrypt_decrypt( + make_sample_message(), + MessageSecurityMode::Sign, + SecurityPolicy::Basic256Sha256, + ); } /// Create a message, encode it to a chunk, sign the chunk, encrypt, decrypt, verify the signature and decode back to message @@ -125,7 +173,11 @@ fn symmetric_sign_message_chunk_basic256sha256() { fn symmetric_sign_and_encrypt_message_chunk_basic128rsa15() { let _ = Test::setup(); error!("symmetric_sign_and_encrypt_message_chunk_basic128rsa15"); - test_symmetric_encrypt_decrypt(make_sample_message(), MessageSecurityMode::SignAndEncrypt, SecurityPolicy::Basic128Rsa15); + test_symmetric_encrypt_decrypt( + make_sample_message(), + MessageSecurityMode::SignAndEncrypt, + SecurityPolicy::Basic128Rsa15, + ); } /// Create a message, encode it to a chunk, sign the chunk, encrypt, decrypt, verify the signature and decode back to message @@ -133,7 +185,11 @@ fn symmetric_sign_and_encrypt_message_chunk_basic128rsa15() { fn symmetric_sign_and_encrypt_message_chunk_basic256() { let _ = Test::setup(); error!("symmetric_sign_and_encrypt_message_chunk_basic256"); - test_symmetric_encrypt_decrypt(make_sample_message(), MessageSecurityMode::SignAndEncrypt, SecurityPolicy::Basic256); + test_symmetric_encrypt_decrypt( + make_sample_message(), + MessageSecurityMode::SignAndEncrypt, + SecurityPolicy::Basic256, + ); } /// Create a message, encode it to a chunk, sign the chunk, encrypt, decrypt, verify the signature and decode back to message @@ -141,5 +197,9 @@ fn symmetric_sign_and_encrypt_message_chunk_basic256() { fn symmetric_sign_and_encrypt_message_chunk_basic256sha256() { let _ = Test::setup(); error!("symmetric_sign_and_encrypt_message_chunk_basic256sha256"); - test_symmetric_encrypt_decrypt(make_sample_message(), MessageSecurityMode::SignAndEncrypt, SecurityPolicy::Basic256Sha256); + test_symmetric_encrypt_decrypt( + make_sample_message(), + MessageSecurityMode::SignAndEncrypt, + SecurityPolicy::Basic256Sha256, + ); } diff --git a/core/src/tests/services.rs b/core/src/tests/services.rs index bb016829c..80456e0f9 100644 --- a/core/src/tests/services.rs +++ b/core/src/tests/services.rs @@ -23,4 +23,4 @@ fn get_endpoints_request() { profile_uris: Some(vec![UAString::from("xyz")]), }; serialize_test(r); -} \ No newline at end of file +} diff --git a/crypto/src/aeskey.rs b/crypto/src/aeskey.rs index af6e94dcf..39011f60a 100644 --- a/crypto/src/aeskey.rs +++ b/crypto/src/aeskey.rs @@ -17,16 +17,29 @@ pub struct AesKey { impl AesKey { pub fn new(security_policy: SecurityPolicy, value: &[u8]) -> AesKey { - AesKey { value: value.to_vec(), security_policy } + AesKey { + value: value.to_vec(), + security_policy, + } } pub fn value(&self) -> &[u8] { &self.value } - fn validate_aes_args(cipher: &Cipher, src: &[u8], iv: &[u8], dst: &mut [u8]) -> Result<(), StatusCode> { + fn validate_aes_args( + cipher: &Cipher, + src: &[u8], + iv: &[u8], + dst: &mut [u8], + ) -> Result<(), StatusCode> { if dst.len() < src.len() + cipher.block_size() { - error!("Dst buffer is too small {} vs {} + {}", src.len(), dst.len(), cipher.block_size()); + error!( + "Dst buffer is too small {} vs {} + {}", + src.len(), + dst.len(), + cipher.block_size() + ); Err(StatusCode::BadUnexpectedError) } else if iv.len() != 16 && iv.len() != 32 { // ... It would be nice to compare iv size to be exact to the key size here (should be the @@ -46,7 +59,9 @@ impl AesKey { // Aes128_CBC Cipher::aes_128_cbc() } - SecurityPolicy::Basic256 | SecurityPolicy::Basic256Sha256 | SecurityPolicy::Aes256Sha256RsaPss => { + SecurityPolicy::Basic256 + | SecurityPolicy::Basic256Sha256 + | SecurityPolicy::Aes256Sha256RsaPss => { // Aes256_CBC Cipher::aes_256_cbc() } @@ -57,7 +72,13 @@ impl AesKey { } /// Encrypt or decrypt data according to the mode - fn do_cipher(&self, mode: Mode, src: &[u8], iv: &[u8], dst: &mut [u8]) -> Result { + fn do_cipher( + &self, + mode: Mode, + src: &[u8], + iv: &[u8], + dst: &mut [u8], + ) -> Result { let cipher = self.cipher(); Self::validate_aes_args(&cipher, src, iv, dst)?; @@ -69,7 +90,8 @@ impl AesKey { crypter.pad(false); let result = crypter.update(src, dst); if let Ok(count) = result { - crypter.finalize(&mut dst[count..]) + crypter + .finalize(&mut dst[count..]) .map(|rest| { trace!("do cipher size {}", count + rest); count + rest @@ -125,4 +147,4 @@ mod tests { }); let _ = child.join(); } -} \ No newline at end of file +} diff --git a/crypto/src/certificate_store.rs b/crypto/src/certificate_store.rs index 3cc9ca461..167cef63d 100644 --- a/crypto/src/certificate_store.rs +++ b/crypto/src/certificate_store.rs @@ -6,18 +6,15 @@ //! for checking certificates supplied by the remote end to see if they are valid and trusted or not. use opcua_types::service_types::ApplicationDescription; use opcua_types::status_code::StatusCode; -use openssl::{ - pkey, - x509, -}; -use std::fs::{File, metadata}; +use openssl::{pkey, x509}; +use std::fs::{metadata, File}; use std::io::{Read, Write}; use std::path::{Path, PathBuf}; use crate::{ pkey::PrivateKey, security_policy::SecurityPolicy, - x509::{X509, X509Data}, + x509::{X509Data, X509}, }; /// Default path to the applications own certificate @@ -62,7 +59,12 @@ impl CertificateStore { } /// Sets up the certificate store, creates the path to it, and optionally creates a demo cert - pub fn new_with_keypair(pki_path: &Path, cert_path: Option<&Path>, pkey_path: Option<&Path>, application_description: Option) -> (CertificateStore, Option, Option) { + pub fn new_with_keypair( + pki_path: &Path, + cert_path: Option<&Path>, + pkey_path: Option<&Path>, + application_description: Option, + ) -> (CertificateStore, Option, Option) { let mut certificate_store = CertificateStore::new(pki_path); if let (Some(cert_path), Some(pkey_path)) = (cert_path, pkey_path) { certificate_store.own_certificate_path = cert_path.to_path_buf(); @@ -77,7 +79,10 @@ impl CertificateStore { (Some(cert), Some(pkey)) } else if let Some(application_description) = application_description { info!("Creating sample application instance certificate and private key"); - let result = certificate_store.create_and_store_application_instance_cert(&X509Data::from(application_description), false); + let result = certificate_store.create_and_store_application_instance_cert( + &X509Data::from(application_description), + false, + ); if let Err(err) = result { error!("Certificate creation failed, error = {}", err); (None, None) @@ -86,7 +91,10 @@ impl CertificateStore { (Some(cert), Some(pkey)) } } else { - error!("Application instance certificate and private key could not be read - {}", result.unwrap_err()); + error!( + "Application instance certificate and private key could not be read - {}", + result.unwrap_err() + ); (None, None) } }; @@ -112,14 +120,18 @@ impl CertificateStore { pub fn read_own_cert_and_pkey(&self) -> Result<(X509, PrivateKey), String> { if let Ok(cert) = CertificateStore::read_cert(&self.own_certificate_path()) { CertificateStore::read_pkey(&self.own_private_key_path()) - .map(|pkey| { - (cert, pkey) - }) + .map(|pkey| (cert, pkey)) .map_err(|_| { - format!("Cannot read pkey from path {:?}", self.own_private_key_path()) + format!( + "Cannot read pkey from path {:?}", + self.own_private_key_path() + ) }) } else { - Err(format!("Cannot read cert from path {:?}", self.own_certificate_path())) + Err(format!( + "Cannot read cert from path {:?}", + self.own_certificate_path() + )) } } @@ -135,7 +147,11 @@ impl CertificateStore { /// This function will use the supplied arguments to create an Application Instance Certificate /// consisting of a X509v3 certificate and public/private key pair. The cert (including pubkey) /// and private key will be written to disk under the pki path. - pub fn create_and_store_application_instance_cert(&self, args: &X509Data, overwrite: bool) -> Result<(X509, PrivateKey), String> { + pub fn create_and_store_application_instance_cert( + &self, + args: &X509Data, + overwrite: bool, + ) -> Result<(X509, PrivateKey), String> { // Create the cert and corresponding private key let (cert, pkey) = X509::cert_and_pkey(args)?; @@ -144,7 +160,10 @@ impl CertificateStore { // Write the private key let pem = pkey.private_key_to_pem().unwrap(); - info!("Writing private key to {}", &self.own_private_key_path().display()); + info!( + "Writing private key to {}", + &self.own_private_key_path().display() + ); let _ = CertificateStore::write_to_file(&pem, &self.own_private_key_path(), overwrite)?; Ok((cert, pkey)) @@ -158,8 +177,19 @@ impl CertificateStore { /// A non `Good` status code indicates a failure in the cert or in some action required in /// order to validate it. /// - pub fn validate_or_reject_application_instance_cert(&self, cert: &X509, security_policy: SecurityPolicy, hostname: Option<&str>, application_uri: Option<&str>) -> StatusCode { - let result = self.validate_application_instance_cert(cert, security_policy, hostname, application_uri); + pub fn validate_or_reject_application_instance_cert( + &self, + cert: &X509, + security_policy: SecurityPolicy, + hostname: Option<&str>, + application_uri: Option<&str>, + ) -> StatusCode { + let result = self.validate_application_instance_cert( + cert, + security_policy, + hostname, + application_uri, + ); if result.is_bad() { match result { StatusCode::BadUnexpectedError | StatusCode::BadSecurityChecksFailed => { @@ -186,7 +216,11 @@ impl CertificateStore { } else { let cert2 = CertificateStore::read_cert(cert_path); if cert2.is_err() { - trace!("Cannot read cert from disk {:?} - {}", cert_path, cert2.unwrap_err()); + trace!( + "Cannot read cert from disk {:?} - {}", + cert_path, + cert2.unwrap_err() + ); // No cert2 to compare to false } else { @@ -209,7 +243,13 @@ impl CertificateStore { /// A non `Good` status code indicates a failure in the cert or in some action required in /// order to validate it. /// - pub fn validate_application_instance_cert(&self, cert: &X509, security_policy: SecurityPolicy, hostname: Option<&str>, application_uri: Option<&str>) -> StatusCode { + pub fn validate_application_instance_cert( + &self, + cert: &X509, + security_policy: SecurityPolicy, + hostname: Option<&str>, + application_uri: Option<&str>, + ) -> StatusCode { let cert_file_name = CertificateStore::cert_file_name(&cert); debug!("Validating cert with name on disk {}", cert_file_name); @@ -218,12 +258,18 @@ impl CertificateStore { { let mut cert_path = self.rejected_certs_dir(); if !cert_path.exists() { - error!("Path for rejected certificates {} does not exist", cert_path.display()); + error!( + "Path for rejected certificates {} does not exist", + cert_path.display() + ); return StatusCode::BadUnexpectedError; } cert_path.push(&cert_file_name); if cert_path.exists() { - warn!("Certificate {} is untrusted because it resides in the rejected directory", cert_file_name); + warn!( + "Certificate {} is untrusted because it resides in the rejected directory", + cert_file_name + ); return StatusCode::BadSecurityChecksFailed; } } @@ -234,7 +280,10 @@ impl CertificateStore { // Check the trusted folder let mut cert_path = self.trusted_certs_dir(); if !cert_path.exists() { - error!("Path for rejected certificates {} does not exist", cert_path.display()); + error!( + "Path for rejected certificates {} does not exist", + cert_path.display() + ); return StatusCode::BadUnexpectedError; } cert_path.push(&cert_file_name); @@ -246,7 +295,7 @@ impl CertificateStore { // Put the unknown cert into the trusted folder warn!("Certificate {} is unknown but policy will store it into the trusted directory", cert_file_name); let _ = self.store_trusted_cert(cert); - // Note that we drop through and still check the cert for validity + // Note that we drop through and still check the cert for validity } else { warn!("Certificate {} is unknown and untrusted so it will be stored in rejected directory", cert_file_name); let _ = self.store_rejected_cert(cert); @@ -268,7 +317,10 @@ impl CertificateStore { } Ok(key_length) => { if !security_policy.is_valid_keylength(key_length) { - warn!("Certificate {} has an invalid key length {} for the policy {}", cert_file_name, key_length, security_policy); + warn!( + "Certificate {} has an invalid key length {} for the policy {}", + cert_file_name, key_length, security_policy + ); return StatusCode::BadSecurityChecksFailed; } } @@ -280,7 +332,10 @@ impl CertificateStore { let now = Utc::now(); let status_code = cert.is_time_valid(&now); if status_code.is_bad() { - warn!("Certificate {} is not valid for now, check start/end timestamps", cert_file_name); + warn!( + "Certificate {} is not valid for now, check start/end timestamps", + cert_file_name + ); return status_code; } } @@ -289,7 +344,10 @@ impl CertificateStore { if let Some(hostname) = hostname { let status_code = cert.is_hostname_valid(hostname); if status_code.is_bad() { - warn!("Certificate {} does not have a valid hostname", cert_file_name); + warn!( + "Certificate {} does not have a valid hostname", + cert_file_name + ); return status_code; } } @@ -298,7 +356,10 @@ impl CertificateStore { if let Some(application_uri) = application_uri { let status_code = cert.is_application_uri_valid(application_uri); if status_code.is_bad() { - warn!("Certificate {} does not have a valid application uri", cert_file_name); + warn!( + "Certificate {} does not have a valid application uri", + cert_file_name + ); return status_code; } } @@ -360,9 +421,8 @@ impl CertificateStore { Ok(()) } } else { - std::fs::create_dir_all(path).map_err(|_| { - format!("Cannot make directories for {}", path.display()) - }) + std::fs::create_dir_all(path) + .map_err(|_| format!("Cannot make directories for {}", path.display())) } } @@ -454,16 +514,22 @@ impl CertificateStore { let mut cert = Vec::new(); let bytes_read = file.read_to_end(&mut cert); if bytes_read.is_err() { - return Err(format!("Could not read bytes from cert file {}", path.display())); + return Err(format!( + "Could not read bytes from cert file {}", + path.display() + )); } let cert = match path.extension() { Some(v) if v == "der" => x509::X509::from_der(&cert), Some(v) if v == "pem" => x509::X509::from_pem(&cert), - _ => return Err(format!("Only .der and .pem certificates are supported")) + _ => return Err(format!("Only .der and .pem certificates are supported")), }; if cert.is_err() { - return Err(format!("Could not read cert from cert file {}", path.display())); + return Err(format!( + "Could not read cert from cert file {}", + path.display() + )); } Ok(X509::from(cert.unwrap())) @@ -483,11 +549,10 @@ impl CertificateStore { CertificateStore::ensure_dir(parent)?; } match File::create(file_path) { - Ok(mut file) => file.write(bytes) - .map_err(|_| { - format!("Could not write bytes to file {}", file_path.display()) - }), - Err(_) => Err(format!("Could not create file {}", file_path.display())) + Ok(mut file) => file + .write(bytes) + .map_err(|_| format!("Could not write bytes to file {}", file_path.display())), + Err(_) => Err(format!("Could not create file {}", file_path.display())), } } } diff --git a/crypto/src/hash.rs b/crypto/src/hash.rs index 6d4f17baa..35bc5caab 100644 --- a/crypto/src/hash.rs +++ b/crypto/src/hash.rs @@ -25,7 +25,12 @@ use crate::{SHA1_SIZE, SHA256_SIZE}; /// A(0) = seed /// A(n) = HMAC_SHA1(secret, A(n-1)) /// + indicates that the results are appended to previous results. -pub fn p_sha(message_digest: hash::MessageDigest, secret: &[u8], seed: &[u8], length: usize) -> Vec { +pub fn p_sha( + message_digest: hash::MessageDigest, + secret: &[u8], + seed: &[u8], + length: usize, +) -> Vec { let mut result = Vec::with_capacity(length); let mut hmac = Vec::with_capacity(seed.len() * 2); @@ -62,7 +67,12 @@ fn hmac_vec(digest: hash::MessageDigest, key: &[u8], data: &[u8]) -> Vec { signer.sign_to_vec().unwrap() } -fn hmac(digest: hash::MessageDigest, key: &[u8], data: &[u8], signature: &mut [u8]) -> Result<(), StatusCode> { +fn hmac( + digest: hash::MessageDigest, + key: &[u8], + data: &[u8], + signature: &mut [u8], +) -> Result<(), StatusCode> { let hmac = hmac_vec(digest, key, data); trace!("hmac length = {}", hmac.len()); signature.copy_from_slice(&hmac); @@ -73,7 +83,10 @@ pub fn hmac_sha1(key: &[u8], data: &[u8], signature: &mut [u8]) -> Result<(), St if signature.len() == SHA1_SIZE { hmac(hash::MessageDigest::sha1(), key, data, signature) } else { - error!("Signature buffer length must be exactly {} bytes to receive hmac_sha1 signature", SHA1_SIZE); + error!( + "Signature buffer length must be exactly {} bytes to receive hmac_sha1 signature", + SHA1_SIZE + ); Err(StatusCode::BadInvalidArgument) } } @@ -98,7 +111,10 @@ pub fn hmac_sha256(key: &[u8], data: &[u8], signature: &mut [u8]) -> Result<(), if signature.len() == SHA256_SIZE { hmac(hash::MessageDigest::sha256(), key, data, signature) } else { - error!("Signature buffer length must be exactly {} bytes to receive hmac_sha256 signature", SHA256_SIZE); + error!( + "Signature buffer length must be exactly {} bytes to receive hmac_sha256 signature", + SHA256_SIZE + ); Err(StatusCode::BadInvalidArgument) } } diff --git a/crypto/src/lib.rs b/crypto/src/lib.rs index b35363c3e..9a88be362 100644 --- a/crypto/src/lib.rs +++ b/crypto/src/lib.rs @@ -11,34 +11,24 @@ extern crate log; #[macro_use] extern crate serde_derive; +use opcua_types::{service_types::SignatureData, status_code::StatusCode, ByteString, UAString}; pub use { - aeskey::*, - certificate_store::*, - hash::*, - pkey::*, - security_policy::*, - thumbprint::*, - user_identity::*, - x509::*, -}; -use opcua_types::{ - ByteString, service_types::SignatureData, - status_code::StatusCode, - UAString, + aeskey::*, certificate_store::*, hash::*, pkey::*, security_policy::*, thumbprint::*, + user_identity::*, x509::*, }; #[cfg(test)] mod tests; -pub mod x509; pub mod aeskey; -pub mod pkey; -pub mod thumbprint; pub mod certificate_store; pub mod hash; +pub mod pkey; +pub mod random; pub mod security_policy; +pub mod thumbprint; pub mod user_identity; -pub mod random; +pub mod x509; // Size of a SHA1 hash value in bytes pub const SHA1_SIZE: usize = 20; @@ -95,8 +85,12 @@ fn concat_data_and_nonce(data: &[u8], nonce: &[u8]) -> Vec { } /// Creates a `SignatureData` object by signing the supplied certificate and nonce with a pkey -pub fn create_signature_data(signing_key: &PrivateKey, security_policy: SecurityPolicy, contained_cert: &ByteString, nonce: &ByteString) -> Result { - +pub fn create_signature_data( + signing_key: &PrivateKey, + security_policy: SecurityPolicy, + contained_cert: &ByteString, + nonce: &ByteString, +) -> Result { // TODO this function should be refactored to return an error if the contained cert or nonce is incorrect, not a blank signature. That // very much depends on reading the spec to see what should happen if its not possible to create a signature, e.g. because // policy is None. @@ -107,11 +101,11 @@ pub fn create_signature_data(signing_key: &PrivateKey, security_policy: Security let data = concat_data_and_nonce(contained_cert.as_ref(), nonce.as_ref()); // Sign the bytes and return the algorithm, signature match security_policy { - SecurityPolicy::None => ( - UAString::null(), ByteString::null() - ), + SecurityPolicy::None => (UAString::null(), ByteString::null()), SecurityPolicy::Unknown => { - error!("An unknown security policy was passed to create_signature_data and rejected"); + error!( + "An unknown security policy was passed to create_signature_data and rejected" + ); (UAString::null(), ByteString::null()) } security_policy => { @@ -120,35 +114,54 @@ pub fn create_signature_data(signing_key: &PrivateKey, security_policy: Security let _ = security_policy.asymmetric_sign(signing_key, &data, &mut signature)?; ( UAString::from(security_policy.asymmetric_signature_algorithm()), - ByteString::from(&signature) + ByteString::from(&signature), ) } } }; - let signature_data = SignatureData { algorithm, signature }; + let signature_data = SignatureData { + algorithm, + signature, + }; trace!("Creating signature contained_cert = {:?}", signature_data); Ok(signature_data) } /// Verifies that the supplied signature data was produced by the signing cert. The contained cert and nonce are supplied so /// the signature can be verified against the expected data. -pub fn verify_signature_data(signature: &SignatureData, security_policy: SecurityPolicy, signing_cert: &X509, contained_cert: &X509, contained_nonce: &[u8]) -> StatusCode { +pub fn verify_signature_data( + signature: &SignatureData, + security_policy: SecurityPolicy, + signing_cert: &X509, + contained_cert: &X509, + contained_nonce: &[u8], +) -> StatusCode { if let Ok(verification_key) = signing_cert.public_key() { // This is the data that the should have been signed let contained_cert = contained_cert.as_byte_string(); let data = concat_data_and_nonce(contained_cert.as_ref(), contained_nonce); // Verify the signature - let result = security_policy.asymmetric_verify_signature(&verification_key, &data, signature.signature.as_ref(), None); + let result = security_policy.asymmetric_verify_signature( + &verification_key, + &data, + signature.signature.as_ref(), + None, + ); if result.is_ok() { StatusCode::Good } else { let result = result.unwrap_err(); - error!("Client signature verification failed, status code = {}", result); + error!( + "Client signature verification failed, status code = {}", + result + ); result } } else { - error!("Signature verification failed, signing certificate has no public key to verify with"); + error!( + "Signature verification failed, signing certificate has no public key to verify with" + ); StatusCode::BadUnexpectedError } } @@ -157,4 +170,4 @@ pub fn verify_signature_data(signature: &SignatureData, security_policy: Securit pub fn hostname() -> Result { use gethostname::gethostname; gethostname().into_string().map_err(|_| ()) -} \ No newline at end of file +} diff --git a/crypto/src/pkey.rs b/crypto/src/pkey.rs index 1d06ab0f4..75e367c71 100644 --- a/crypto/src/pkey.rs +++ b/crypto/src/pkey.rs @@ -56,7 +56,9 @@ impl Debug for PKey { pub trait KeySize { fn bit_length(&self) -> usize; - fn size(&self) -> usize { self.bit_length() / 8 } + fn size(&self) -> usize { + self.bit_length() / 8 + } fn calculate_cipher_text_size(&self, data_size: usize, padding: RsaPadding) -> usize { let plain_text_block_size = self.plain_text_block_size(padding); @@ -76,7 +78,7 @@ pub trait KeySize { RsaPadding::Pkcs1 => self.size() - 11, RsaPadding::OaepSha1 => self.size() - 42, RsaPadding::OaepSha256 => self.size() - 66, - _ => panic!("Unsupported padding") + _ => panic!("Unsupported padding"), } } @@ -115,22 +117,33 @@ impl PrivateKey { } pub fn private_key_to_pem(&self) -> Result, ()> { - self.value.private_key_to_pem_pkcs8() - .map_err(|_| { - error!("Cannot turn private key to PEM"); - }) + self.value.private_key_to_pem_pkcs8().map_err(|_| { + error!("Cannot turn private key to PEM"); + }) } /// Creates a message digest from the specified block of data and then signs it to return a signature - fn sign(&self, message_digest: hash::MessageDigest, data: &[u8], signature: &mut [u8], padding: RsaPadding) -> Result { + fn sign( + &self, + message_digest: hash::MessageDigest, + data: &[u8], + signature: &mut [u8], + padding: RsaPadding, + ) -> Result { trace!("RSA signing"); if let Ok(mut signer) = sign::Signer::new(message_digest, &self.value) { let _ = signer.set_rsa_padding(padding.into()); let _ = signer.set_rsa_pss_saltlen(RsaPssSaltlen::DIGEST_LENGTH); if signer.update(data).is_ok() { - return signer.sign_to_vec() + return signer + .sign_to_vec() .map(|result| { - trace!("Signature result, len {} = {:?}, copying to signature len {}", result.len(), result, signature.len()); + trace!( + "Signature result, len {} = {:?}, copying to signature len {}", + result.len(), + result, + signature.len() + ); signature.copy_from_slice(&result); result.len() }) @@ -145,22 +158,42 @@ impl PrivateKey { /// Signs the data using RSA-SHA1 pub fn sign_sha1(&self, data: &[u8], signature: &mut [u8]) -> Result { - self.sign(hash::MessageDigest::sha1(), data, signature, RsaPadding::Pkcs1) + self.sign( + hash::MessageDigest::sha1(), + data, + signature, + RsaPadding::Pkcs1, + ) } /// Signs the data using RSA-SHA256 pub fn sign_sha256(&self, data: &[u8], signature: &mut [u8]) -> Result { - self.sign(hash::MessageDigest::sha256(), data, signature, RsaPadding::Pkcs1) + self.sign( + hash::MessageDigest::sha256(), + data, + signature, + RsaPadding::Pkcs1, + ) } /// Signs the data using RSA-SHA256-PSS pub fn sign_sha256_pss(&self, data: &[u8], signature: &mut [u8]) -> Result { - self.sign(hash::MessageDigest::sha256(), data, signature, RsaPadding::Pkcs1Pss) + self.sign( + hash::MessageDigest::sha256(), + data, + signature, + RsaPadding::Pkcs1Pss, + ) } /// Decrypts data in src to dst using the specified padding and returning the size of the decrypted /// data in bytes or an error. - pub fn private_decrypt(&self, src: &[u8], dst: &mut [u8], padding: RsaPadding) -> Result { + pub fn private_decrypt( + &self, + src: &[u8], + dst: &mut [u8], + padding: RsaPadding, + ) -> Result { // decrypt data using our private key let cipher_text_block_size = self.cipher_text_block_size(); let rsa = self.value.rsa().unwrap(); @@ -205,13 +238,24 @@ impl PublicKey { } /// Verifies that the signature matches the hash / signing key of the supplied data - fn verify(&self, message_digest: hash::MessageDigest, data: &[u8], signature: &[u8], padding: RsaPadding) -> Result { - trace!("RSA verifying, against signature {:?}, len {}", signature, signature.len()); + fn verify( + &self, + message_digest: hash::MessageDigest, + data: &[u8], + signature: &[u8], + padding: RsaPadding, + ) -> Result { + trace!( + "RSA verifying, against signature {:?}, len {}", + signature, + signature.len() + ); if let Ok(mut verifier) = sign::Verifier::new(message_digest, &self.value) { let _ = verifier.set_rsa_padding(padding.into()); let _ = verifier.set_rsa_pss_saltlen(RsaPssSaltlen::DIGEST_LENGTH); if verifier.update(data).is_ok() { - return verifier.verify(signature) + return verifier + .verify(signature) .map(|result| { trace!("Key verified = {:?}", result); result @@ -227,22 +271,42 @@ impl PublicKey { /// Verifies the data using RSA-SHA1 pub fn verify_sha1(&self, data: &[u8], signature: &[u8]) -> Result { - self.verify(hash::MessageDigest::sha1(), data, signature, RsaPadding::Pkcs1) + self.verify( + hash::MessageDigest::sha1(), + data, + signature, + RsaPadding::Pkcs1, + ) } /// Verifies the data using RSA-SHA256 pub fn verify_sha256(&self, data: &[u8], signature: &[u8]) -> Result { - self.verify(hash::MessageDigest::sha256(), data, signature, RsaPadding::Pkcs1) + self.verify( + hash::MessageDigest::sha256(), + data, + signature, + RsaPadding::Pkcs1, + ) } /// Verifies the data using RSA-SHA256-PSS pub fn verify_sha256_pss(&self, data: &[u8], signature: &[u8]) -> Result { - self.verify(hash::MessageDigest::sha256(), data, signature, RsaPadding::Pkcs1Pss) + self.verify( + hash::MessageDigest::sha256(), + data, + signature, + RsaPadding::Pkcs1Pss, + ) } /// Encrypts data from src to dst using the specified padding and returns the size of encrypted /// data in bytes or an error. - pub fn public_encrypt(&self, src: &[u8], dst: &mut [u8], padding: RsaPadding) -> Result { + pub fn public_encrypt( + &self, + src: &[u8], + dst: &mut [u8], + padding: RsaPadding, + ) -> Result { let cipher_text_block_size = self.cipher_text_block_size(); let plain_text_block_size = self.plain_text_block_size(padding); @@ -296,10 +360,14 @@ impl PublicKey { mod oaep_sha256 { use std::ptr; - use foreign_types::{ForeignType}; + use foreign_types::ForeignType; use libc::*; - use openssl::{error, pkey::{Private, Public}, rsa::{self, Rsa}}; - use openssl_sys::{*}; + use openssl::{ + error, + pkey::{Private, Public}, + rsa::{self, Rsa}, + }; + use openssl_sys::*; // This sets up the context for encrypting / decrypting with OAEP + SHA256 unsafe fn set_evp_ctrl_oaep_sha256(ctx: *mut EVP_PKEY_CTX) { @@ -308,11 +376,22 @@ mod oaep_sha256 { EVP_PKEY_CTX_set_rsa_mgf1_md(ctx, md); // This is a hack because OpenSSL crate doesn't expose this const or a wrapper fn const EVP_PKEY_CTRL_RSA_OAEP_MD: c_int = EVP_PKEY_ALG_CTRL + 9; - EVP_PKEY_CTX_ctrl(ctx, EVP_PKEY_RSA, EVP_PKEY_OP_TYPE_CRYPT, EVP_PKEY_CTRL_RSA_OAEP_MD, 0, md as *mut c_void); + EVP_PKEY_CTX_ctrl( + ctx, + EVP_PKEY_RSA, + EVP_PKEY_OP_TYPE_CRYPT, + EVP_PKEY_CTRL_RSA_OAEP_MD, + 0, + md as *mut c_void, + ); } /// Special case implementation uses OAEP with SHA256 - pub fn decrypt(pkey: &Rsa, from: &[u8], to: &mut [u8]) -> Result { + pub fn decrypt( + pkey: &Rsa, + from: &[u8], + to: &mut [u8], + ) -> Result { let result; unsafe { let priv_key = EVP_PKEY_new(); @@ -326,11 +405,21 @@ mod oaep_sha256 { set_evp_ctrl_oaep_sha256(ctx); let mut out_len: size_t = to.len(); - let ret = EVP_PKEY_decrypt(ctx, to.as_mut_ptr(), &mut out_len, from.as_ptr(), from.len()); + let ret = EVP_PKEY_decrypt( + ctx, + to.as_mut_ptr(), + &mut out_len, + from.as_ptr(), + from.len(), + ); if ret > 0 && out_len > 0 { result = Ok(out_len as usize); } else { - trace!("oaep_sha256::decrypt EVP_PKEY_decrypt, ret = {}, out_len = {}", ret, out_len); + trace!( + "oaep_sha256::decrypt EVP_PKEY_decrypt, ret = {}, out_len = {}", + ret, + out_len + ); result = Err(error::ErrorStack::get()); } EVP_PKEY_CTX_free(ctx); @@ -339,7 +428,10 @@ mod oaep_sha256 { result = Err(error::ErrorStack::get()); } } else { - trace!("oaep_sha256::decrypt EVP_PKEY_new failed, err {}", ERR_get_error()); + trace!( + "oaep_sha256::decrypt EVP_PKEY_new failed, err {}", + ERR_get_error() + ); result = Err(error::ErrorStack::get()); } } @@ -348,7 +440,11 @@ mod oaep_sha256 { } /// Special case implementation uses OAEP with SHA256 - pub fn encrypt(pkey: &Rsa, from: &[u8], to: &mut [u8]) -> Result { + pub fn encrypt( + pkey: &Rsa, + from: &[u8], + to: &mut [u8], + ) -> Result { let result; unsafe { let pub_key = EVP_PKEY_new(); @@ -362,11 +458,21 @@ mod oaep_sha256 { set_evp_ctrl_oaep_sha256(ctx); let mut out_len: size_t = to.len(); - let ret = EVP_PKEY_encrypt(ctx, to.as_mut_ptr(), &mut out_len, from.as_ptr(), from.len()); + let ret = EVP_PKEY_encrypt( + ctx, + to.as_mut_ptr(), + &mut out_len, + from.as_ptr(), + from.len(), + ); if ret > 0 && out_len > 0 { result = Ok(out_len as usize); } else { - trace!("oaep_sha256::encrypt EVP_PKEY_encrypt, ret = {}, out_len = {}", ret, out_len); + trace!( + "oaep_sha256::encrypt EVP_PKEY_encrypt, ret = {}, out_len = {}", + ret, + out_len + ); result = Err(error::ErrorStack::get()); } EVP_PKEY_CTX_free(ctx); @@ -375,10 +481,13 @@ mod oaep_sha256 { result = Err(error::ErrorStack::get()); } } else { - trace!("oaep_sha256::encrypt EVP_PKEY_new failed, err {}", ERR_get_error()); + trace!( + "oaep_sha256::encrypt EVP_PKEY_new failed, err {}", + ERR_get_error() + ); result = Err(error::ErrorStack::get()); } } result } -} \ No newline at end of file +} diff --git a/crypto/src/security_policy.rs b/crypto/src/security_policy.rs index 773bafdc9..a3f50f5ea 100644 --- a/crypto/src/security_policy.rs +++ b/crypto/src/security_policy.rs @@ -9,19 +9,13 @@ use std::str::FromStr; use openssl::hash as openssl_hash; -use opcua_types::{ - ByteString, - constants, - status_code::StatusCode, -}; +use opcua_types::{constants, status_code::StatusCode, ByteString}; use crate::{ aeskey::AesKey, hash, pkey::{KeySize, PrivateKey, PublicKey, RsaPadding}, - random, - SHA1_SIZE, - SHA256_SIZE, + random, SHA1_SIZE, SHA256_SIZE, }; // These are constants that govern the different encryption / signing modes for OPC UA. In some @@ -46,7 +40,8 @@ mod aes_128_sha_256_rsa_oaep { use crate::algorithms::*; pub const SECURITY_POLICY: &str = "Aes128-Sha256-RsaOaep"; - pub const SECURITY_POLICY_URI: &str = "http://opcfoundation.org/UA/SecurityPolicy#Aes128_Sha256_RsaOaep"; + pub const SECURITY_POLICY_URI: &str = + "http://opcfoundation.org/UA/SecurityPolicy#Aes128_Sha256_RsaOaep"; pub const SYMMETRIC_SIGNATURE_ALGORITHM: &str = DSIG_HMAC_SHA256; pub const ASYMMETRIC_SIGNATURE_ALGORITHM: &str = DSIG_RSA_SHA256; @@ -73,7 +68,8 @@ mod aes_256_sha_256_rsa_pss { use crate::algorithms::*; pub const SECURITY_POLICY: &str = "Aes256-Sha256-RsaPss"; - pub const SECURITY_POLICY_URI: &str = "http://opcfoundation.org/UA/SecurityPolicy#Aes256_Sha256_RsaPss"; + pub const SECURITY_POLICY_URI: &str = + "http://opcfoundation.org/UA/SecurityPolicy#Aes256_Sha256_RsaPss"; pub const SYMMETRIC_SIGNATURE_ALGORITHM: &str = DSIG_HMAC_SHA256; pub const ASYMMETRIC_SIGNATURE_ALGORITHM: &str = DSIG_RSA_PSS_SHA2_256; @@ -100,7 +96,8 @@ mod basic_256_sha_256 { use crate::algorithms::*; pub const SECURITY_POLICY: &str = "Basic256Sha256"; - pub const SECURITY_POLICY_URI: &str = "http://opcfoundation.org/UA/SecurityPolicy#Basic256Sha256"; + pub const SECURITY_POLICY_URI: &str = + "http://opcfoundation.org/UA/SecurityPolicy#Basic256Sha256"; pub const SYMMETRIC_SIGNATURE_ALGORITHM: &str = DSIG_HMAC_SHA256; pub const ASYMMETRIC_SIGNATURE_ALGORITHM: &str = DSIG_RSA_SHA256; @@ -126,7 +123,8 @@ mod basic_128_rsa_15 { use crate::algorithms::*; pub const SECURITY_POLICY: &str = "Basic128Rsa15"; - pub const SECURITY_POLICY_URI: &str = "http://opcfoundation.org/UA/SecurityPolicy#Basic128Rsa15"; + pub const SECURITY_POLICY_URI: &str = + "http://opcfoundation.org/UA/SecurityPolicy#Basic128Rsa15"; pub const SYMMETRIC_SIGNATURE_ALGORITHM: &str = DSIG_HMAC_SHA1; pub const ASYMMETRIC_SIGNATURE_ALGORITHM: &str = DSIG_RSA_SHA1; @@ -185,12 +183,20 @@ impl FromStr for SecurityPolicy { fn from_str(s: &str) -> Result { Ok(match s { - constants::SECURITY_POLICY_NONE | constants::SECURITY_POLICY_NONE_URI => SecurityPolicy::None, - basic_128_rsa_15::SECURITY_POLICY | basic_128_rsa_15::SECURITY_POLICY_URI => SecurityPolicy::Basic128Rsa15, + constants::SECURITY_POLICY_NONE | constants::SECURITY_POLICY_NONE_URI => { + SecurityPolicy::None + } + basic_128_rsa_15::SECURITY_POLICY | basic_128_rsa_15::SECURITY_POLICY_URI => { + SecurityPolicy::Basic128Rsa15 + } basic_256::SECURITY_POLICY | basic_256::SECURITY_POLICY_URI => SecurityPolicy::Basic256, - basic_256_sha_256::SECURITY_POLICY | basic_256_sha_256::SECURITY_POLICY_URI => SecurityPolicy::Basic256Sha256, - aes_128_sha_256_rsa_oaep::SECURITY_POLICY | aes_128_sha_256_rsa_oaep::SECURITY_POLICY_URI => SecurityPolicy::Aes128Sha256RsaOaep, - aes_256_sha_256_rsa_pss::SECURITY_POLICY | aes_256_sha_256_rsa_pss::SECURITY_POLICY_URI => SecurityPolicy::Aes256Sha256RsaPss, + basic_256_sha_256::SECURITY_POLICY | basic_256_sha_256::SECURITY_POLICY_URI => { + SecurityPolicy::Basic256Sha256 + } + aes_128_sha_256_rsa_oaep::SECURITY_POLICY + | aes_128_sha_256_rsa_oaep::SECURITY_POLICY_URI => SecurityPolicy::Aes128Sha256RsaOaep, + aes_256_sha_256_rsa_pss::SECURITY_POLICY + | aes_256_sha_256_rsa_pss::SECURITY_POLICY_URI => SecurityPolicy::Aes256Sha256RsaPss, _ => { error!("Specified security policy \"{}\" is not recognized", s); SecurityPolicy::Unknown @@ -223,9 +229,13 @@ impl SecurityPolicy { /// Returns true if the security policy is supported. It might be recognized but be unsupported by the implementation pub fn is_supported(&self) -> bool { match self { - SecurityPolicy::None | SecurityPolicy::Basic128Rsa15 | SecurityPolicy::Basic256 | - SecurityPolicy::Basic256Sha256 | SecurityPolicy::Aes128Sha256RsaOaep | SecurityPolicy::Aes256Sha256RsaPss => true, - _ => false + SecurityPolicy::None + | SecurityPolicy::Basic128Rsa15 + | SecurityPolicy::Basic256 + | SecurityPolicy::Basic256Sha256 + | SecurityPolicy::Aes128Sha256RsaOaep + | SecurityPolicy::Aes256Sha256RsaPss => true, + _ => false, } } @@ -234,7 +244,7 @@ impl SecurityPolicy { match self { // Since 1.04 because SHA-1 is no longer considered safe SecurityPolicy::Basic128Rsa15 | SecurityPolicy::Basic256 => true, - _ => false + _ => false, } } @@ -257,8 +267,12 @@ impl SecurityPolicy { SecurityPolicy::Basic128Rsa15 => basic_128_rsa_15::ASYMMETRIC_ENCRYPTION_ALGORITHM, SecurityPolicy::Basic256 => basic_256::ASYMMETRIC_ENCRYPTION_ALGORITHM, SecurityPolicy::Basic256Sha256 => basic_256_sha_256::ASYMMETRIC_ENCRYPTION_ALGORITHM, - SecurityPolicy::Aes128Sha256RsaOaep => aes_128_sha_256_rsa_oaep::ASYMMETRIC_ENCRYPTION_ALGORITHM, - SecurityPolicy::Aes256Sha256RsaPss => aes_256_sha_256_rsa_pss::ASYMMETRIC_ENCRYPTION_ALGORITHM, + SecurityPolicy::Aes128Sha256RsaOaep => { + aes_128_sha_256_rsa_oaep::ASYMMETRIC_ENCRYPTION_ALGORITHM + } + SecurityPolicy::Aes256Sha256RsaPss => { + aes_256_sha_256_rsa_pss::ASYMMETRIC_ENCRYPTION_ALGORITHM + } _ => { panic!("Invalid policy"); } @@ -270,8 +284,12 @@ impl SecurityPolicy { SecurityPolicy::Basic128Rsa15 => basic_128_rsa_15::ASYMMETRIC_SIGNATURE_ALGORITHM, SecurityPolicy::Basic256 => basic_256::ASYMMETRIC_SIGNATURE_ALGORITHM, SecurityPolicy::Basic256Sha256 => basic_256_sha_256::ASYMMETRIC_SIGNATURE_ALGORITHM, - SecurityPolicy::Aes128Sha256RsaOaep => aes_128_sha_256_rsa_oaep::ASYMMETRIC_SIGNATURE_ALGORITHM, - SecurityPolicy::Aes256Sha256RsaPss => aes_256_sha_256_rsa_pss::ASYMMETRIC_SIGNATURE_ALGORITHM, + SecurityPolicy::Aes128Sha256RsaOaep => { + aes_128_sha_256_rsa_oaep::ASYMMETRIC_SIGNATURE_ALGORITHM + } + SecurityPolicy::Aes256Sha256RsaPss => { + aes_256_sha_256_rsa_pss::ASYMMETRIC_SIGNATURE_ALGORITHM + } _ => { panic!("Invalid policy"); } @@ -283,8 +301,12 @@ impl SecurityPolicy { SecurityPolicy::Basic128Rsa15 => basic_128_rsa_15::SYMMETRIC_SIGNATURE_ALGORITHM, SecurityPolicy::Basic256 => basic_256::SYMMETRIC_SIGNATURE_ALGORITHM, SecurityPolicy::Basic256Sha256 => basic_256_sha_256::SYMMETRIC_SIGNATURE_ALGORITHM, - SecurityPolicy::Aes128Sha256RsaOaep => aes_128_sha_256_rsa_oaep::SYMMETRIC_SIGNATURE_ALGORITHM, - SecurityPolicy::Aes256Sha256RsaPss => aes_256_sha_256_rsa_pss::SYMMETRIC_SIGNATURE_ALGORITHM, + SecurityPolicy::Aes128Sha256RsaOaep => { + aes_128_sha_256_rsa_oaep::SYMMETRIC_SIGNATURE_ALGORITHM + } + SecurityPolicy::Aes256Sha256RsaPss => { + aes_256_sha_256_rsa_pss::SYMMETRIC_SIGNATURE_ALGORITHM + } _ => { panic!("Invalid policy"); } @@ -294,8 +316,11 @@ impl SecurityPolicy { // Plaintext block size in bytes pub fn plain_block_size(&self) -> usize { match self { - SecurityPolicy::Basic128Rsa15 | SecurityPolicy::Basic256 | SecurityPolicy::Basic256Sha256 | - SecurityPolicy::Aes128Sha256RsaOaep | SecurityPolicy::Aes256Sha256RsaPss => 16, + SecurityPolicy::Basic128Rsa15 + | SecurityPolicy::Basic256 + | SecurityPolicy::Basic256Sha256 + | SecurityPolicy::Aes128Sha256RsaOaep + | SecurityPolicy::Aes256Sha256RsaPss => 16, _ => { panic!("Invalid policy"); } @@ -307,8 +332,9 @@ impl SecurityPolicy { match self { SecurityPolicy::None => 0, SecurityPolicy::Basic128Rsa15 | SecurityPolicy::Basic256 => SHA1_SIZE, - SecurityPolicy::Basic256Sha256 | SecurityPolicy::Aes128Sha256RsaOaep | - SecurityPolicy::Aes256Sha256RsaPss => SHA256_SIZE, + SecurityPolicy::Basic256Sha256 + | SecurityPolicy::Aes128Sha256RsaOaep + | SecurityPolicy::Aes256Sha256RsaPss => SHA256_SIZE, _ => { panic!("Invalid policy"); } @@ -321,8 +347,12 @@ impl SecurityPolicy { SecurityPolicy::Basic128Rsa15 => basic_128_rsa_15::DERIVED_SIGNATURE_KEY_LENGTH, SecurityPolicy::Basic256 => basic_256::DERIVED_SIGNATURE_KEY_LENGTH, SecurityPolicy::Basic256Sha256 => basic_256_sha_256::DERIVED_SIGNATURE_KEY_LENGTH, - SecurityPolicy::Aes128Sha256RsaOaep => aes_128_sha_256_rsa_oaep::DERIVED_SIGNATURE_KEY_LENGTH, - SecurityPolicy::Aes256Sha256RsaPss => aes_256_sha_256_rsa_pss::DERIVED_SIGNATURE_KEY_LENGTH, + SecurityPolicy::Aes128Sha256RsaOaep => { + aes_128_sha_256_rsa_oaep::DERIVED_SIGNATURE_KEY_LENGTH + } + SecurityPolicy::Aes256Sha256RsaPss => { + aes_256_sha_256_rsa_pss::DERIVED_SIGNATURE_KEY_LENGTH + } _ => { panic!("Invalid policy"); } @@ -361,9 +391,11 @@ impl SecurityPolicy { pub fn secure_channel_nonce_length(&self) -> usize { match self { SecurityPolicy::Basic128Rsa15 => 16, - SecurityPolicy::Basic256 | SecurityPolicy::Basic256Sha256 | - SecurityPolicy::Aes128Sha256RsaOaep | SecurityPolicy::Aes256Sha256RsaPss => 32, - _ => panic!("") + SecurityPolicy::Basic256 + | SecurityPolicy::Basic256Sha256 + | SecurityPolicy::Aes128Sha256RsaOaep + | SecurityPolicy::Aes256Sha256RsaPss => 32, + _ => panic!(""), } } @@ -376,7 +408,10 @@ impl SecurityPolicy { aes_128_sha_256_rsa_oaep::SECURITY_POLICY_URI => SecurityPolicy::Aes128Sha256RsaOaep, aes_256_sha_256_rsa_pss::SECURITY_POLICY_URI => SecurityPolicy::Aes256Sha256RsaPss, _ => { - error!("Specified security policy uri \"{}\" is not recognized", uri); + error!( + "Specified security policy uri \"{}\" is not recognized", + uri + ); SecurityPolicy::Unknown } } @@ -387,9 +422,12 @@ impl SecurityPolicy { fn prf(&self, secret: &[u8], seed: &[u8], length: usize, offset: usize) -> Vec { // P_SHA1 or P_SHA256 let message_digest = match self { - SecurityPolicy::Basic128Rsa15 | SecurityPolicy::Basic256 => openssl_hash::MessageDigest::sha1(), - SecurityPolicy::Basic256Sha256 | SecurityPolicy::Aes128Sha256RsaOaep | - SecurityPolicy::Aes256Sha256RsaPss => openssl_hash::MessageDigest::sha256(), + SecurityPolicy::Basic128Rsa15 | SecurityPolicy::Basic256 => { + openssl_hash::MessageDigest::sha1() + } + SecurityPolicy::Basic256Sha256 + | SecurityPolicy::Aes128Sha256RsaOaep + | SecurityPolicy::Aes256Sha256RsaPss => openssl_hash::MessageDigest::sha256(), _ => { panic!("Invalid policy"); } @@ -433,12 +471,18 @@ impl SecurityPolicy { /// The Client keys are used to secure Messages sent by the Client. The Server keys /// are used to secure Messages sent by the Server. /// - pub fn make_secure_channel_keys(&self, secret: &[u8], seed: &[u8]) -> (Vec, AesKey, Vec) { + pub fn make_secure_channel_keys( + &self, + secret: &[u8], + seed: &[u8], + ) -> (Vec, AesKey, Vec) { // Work out the length of stuff let signing_key_length = self.derived_signature_key_size(); let (encrypting_key_length, encrypting_block_size) = match self { SecurityPolicy::Basic128Rsa15 | SecurityPolicy::Aes128Sha256RsaOaep => (16, 16), - SecurityPolicy::Basic256 | SecurityPolicy::Basic256Sha256 | SecurityPolicy::Aes256Sha256RsaPss => (32, 16), + SecurityPolicy::Basic256 + | SecurityPolicy::Basic256Sha256 + | SecurityPolicy::Aes256Sha256RsaPss => (32, 16), _ => { panic!("Invalid policy"); } @@ -447,17 +491,31 @@ impl SecurityPolicy { let signing_key = self.prf(secret, seed, signing_key_length, 0); let encrypting_key = self.prf(secret, seed, encrypting_key_length, signing_key_length); let encrypting_key = AesKey::new(*self, &encrypting_key); - let iv = self.prf(secret, seed, encrypting_block_size, signing_key_length + encrypting_key_length); + let iv = self.prf( + secret, + seed, + encrypting_block_size, + signing_key_length + encrypting_key_length, + ); (signing_key, encrypting_key, iv) } /// Produce a signature of the data using an asymmetric key. Stores the signature in the supplied /// `signature` buffer. Returns the size of the signature within that buffer. - pub fn asymmetric_sign(&self, signing_key: &PrivateKey, data: &[u8], signature: &mut [u8]) -> Result { + pub fn asymmetric_sign( + &self, + signing_key: &PrivateKey, + data: &[u8], + signature: &mut [u8], + ) -> Result { let result = match self { - SecurityPolicy::Basic128Rsa15 | SecurityPolicy::Basic256 => signing_key.sign_sha1(data, signature)?, - SecurityPolicy::Basic256Sha256 | SecurityPolicy::Aes128Sha256RsaOaep => signing_key.sign_sha256(data, signature)?, + SecurityPolicy::Basic128Rsa15 | SecurityPolicy::Basic256 => { + signing_key.sign_sha1(data, signature)? + } + SecurityPolicy::Basic256Sha256 | SecurityPolicy::Aes128Sha256RsaOaep => { + signing_key.sign_sha256(data, signature)? + } SecurityPolicy::Aes256Sha256RsaPss => signing_key.sign_sha256_pss(data, signature)?, _ => { panic!("Invalid policy"); @@ -469,12 +527,24 @@ impl SecurityPolicy { /// Verifies a signature of the data using an asymmetric key. In a debugging scenario, the /// signing key can also be supplied so that the supplied signature can be compared to a freshly /// generated signature. - pub fn asymmetric_verify_signature(&self, verification_key: &PublicKey, data: &[u8], signature: &[u8], their_private_key: Option) -> Result<(), StatusCode> { + pub fn asymmetric_verify_signature( + &self, + verification_key: &PublicKey, + data: &[u8], + signature: &[u8], + their_private_key: Option, + ) -> Result<(), StatusCode> { // Asymmetric verify signature against supplied certificate let result = match self { - SecurityPolicy::Basic128Rsa15 | SecurityPolicy::Basic256 => verification_key.verify_sha1(data, signature)?, - SecurityPolicy::Basic256Sha256 | SecurityPolicy::Aes128Sha256RsaOaep => verification_key.verify_sha256(data, signature)?, - SecurityPolicy::Aes256Sha256RsaPss => verification_key.verify_sha256_pss(data, signature)?, + SecurityPolicy::Basic128Rsa15 | SecurityPolicy::Basic256 => { + verification_key.verify_sha1(data, signature)? + } + SecurityPolicy::Basic256Sha256 | SecurityPolicy::Aes128Sha256RsaOaep => { + verification_key.verify_sha256(data, signature)? + } + SecurityPolicy::Aes256Sha256RsaPss => { + verification_key.verify_sha256_pss(data, signature)? + } _ => { panic!("Invalid policy"); } @@ -489,7 +559,10 @@ impl SecurityPolicy { // Calculate the signature using their key, see what we were expecting versus theirs let mut their_signature = vec![0u8; their_key.size()]; self.asymmetric_sign(&their_key, data, their_signature.as_mut_slice())?; - trace!("Using their_key, signature should be {:?}", &their_signature); + trace!( + "Using their_key, signature should be {:?}", + &their_signature + ); } Err(StatusCode::BadSecurityChecksFailed) } @@ -500,7 +573,9 @@ impl SecurityPolicy { pub fn asymmetric_encryption_padding(&self) -> RsaPadding { match self { SecurityPolicy::Basic128Rsa15 => RsaPadding::Pkcs1, - SecurityPolicy::Basic256 | SecurityPolicy::Basic256Sha256 | SecurityPolicy::Aes128Sha256RsaOaep => RsaPadding::OaepSha1, + SecurityPolicy::Basic256 + | SecurityPolicy::Basic256Sha256 + | SecurityPolicy::Aes128Sha256RsaOaep => RsaPadding::OaepSha1, // PSS uses OAEP-SHA256 for encryption, but PSS for signing SecurityPolicy::Aes256Sha256RsaPss => RsaPadding::OaepSha256, _ => { @@ -511,18 +586,30 @@ impl SecurityPolicy { /// Encrypts a message using the supplied encryption key, returns the encrypted size. Destination /// buffer must be large enough to hold encrypted bytes including any padding. - pub fn asymmetric_encrypt(&self, encryption_key: &PublicKey, src: &[u8], dst: &mut [u8]) -> Result { + pub fn asymmetric_encrypt( + &self, + encryption_key: &PublicKey, + src: &[u8], + dst: &mut [u8], + ) -> Result { let padding = self.asymmetric_encryption_padding(); - encryption_key.public_encrypt(src, dst, padding) + encryption_key + .public_encrypt(src, dst, padding) .map_err(|_| StatusCode::BadUnexpectedError) } /// Decrypts a message whose thumbprint matches the x509 cert and private key pair. /// /// Returns the number of decrypted bytes - pub fn asymmetric_decrypt(&self, decryption_key: &PrivateKey, src: &[u8], dst: &mut [u8]) -> Result { + pub fn asymmetric_decrypt( + &self, + decryption_key: &PrivateKey, + src: &[u8], + dst: &mut [u8], + ) -> Result { let padding = self.asymmetric_encryption_padding(); - decryption_key.private_decrypt(src, dst, padding) + decryption_key + .private_decrypt(src, dst, padding) .map_err(|_| { error!("Asymmetric decryption failed"); StatusCode::BadSecurityChecksFailed @@ -531,11 +618,24 @@ impl SecurityPolicy { /// Produce a signature of some data using the supplied symmetric key. Signing algorithm is determined /// by the security policy. Signature is stored in the supplied `signature` argument. - pub fn symmetric_sign(&self, key: &[u8], data: &[u8], signature: &mut [u8]) -> Result<(), StatusCode> { - trace!("Producing signature for {} bytes of data into signature of {} bytes", data.len(), signature.len()); + pub fn symmetric_sign( + &self, + key: &[u8], + data: &[u8], + signature: &mut [u8], + ) -> Result<(), StatusCode> { + trace!( + "Producing signature for {} bytes of data into signature of {} bytes", + data.len(), + signature.len() + ); match self { - SecurityPolicy::Basic128Rsa15 | SecurityPolicy::Basic256 => hash::hmac_sha1(key, data, signature), - SecurityPolicy::Basic256Sha256 | SecurityPolicy::Aes128Sha256RsaOaep | SecurityPolicy::Aes256Sha256RsaPss => hash::hmac_sha256(key, data, signature), + SecurityPolicy::Basic128Rsa15 | SecurityPolicy::Basic256 => { + hash::hmac_sha1(key, data, signature) + } + SecurityPolicy::Basic256Sha256 + | SecurityPolicy::Aes128Sha256RsaOaep + | SecurityPolicy::Aes256Sha256RsaPss => hash::hmac_sha256(key, data, signature), _ => { panic!("Unsupported policy") } @@ -543,11 +643,20 @@ impl SecurityPolicy { } /// Verify the signature of a data block using the supplied symmetric key. - pub fn symmetric_verify_signature(&self, key: &[u8], data: &[u8], signature: &[u8]) -> Result { + pub fn symmetric_verify_signature( + &self, + key: &[u8], + data: &[u8], + signature: &[u8], + ) -> Result { // Verify the signature using SHA-1 / SHA-256 HMAC let verified = match self { - SecurityPolicy::Basic128Rsa15 | SecurityPolicy::Basic256 => hash::verify_hmac_sha1(key, data, signature), - SecurityPolicy::Basic256Sha256 | SecurityPolicy::Aes128Sha256RsaOaep | SecurityPolicy::Aes256Sha256RsaPss => hash::verify_hmac_sha256(key, data, signature), + SecurityPolicy::Basic128Rsa15 | SecurityPolicy::Basic256 => { + hash::verify_hmac_sha1(key, data, signature) + } + SecurityPolicy::Basic256Sha256 + | SecurityPolicy::Aes128Sha256RsaOaep + | SecurityPolicy::Aes256Sha256RsaPss => hash::verify_hmac_sha256(key, data, signature), _ => { panic!("Unsupported policy") } @@ -561,12 +670,24 @@ impl SecurityPolicy { } /// Encrypt the supplied data using the supplied key storing the result in the destination. - pub fn symmetric_encrypt(&self, key: &AesKey, iv: &[u8], src: &[u8], dst: &mut [u8]) -> Result { + pub fn symmetric_encrypt( + &self, + key: &AesKey, + iv: &[u8], + src: &[u8], + dst: &mut [u8], + ) -> Result { key.encrypt(src, iv, dst) } /// Decrypts the supplied data using the supplied key storing the result in the destination. - pub fn symmetric_decrypt(&self, key: &AesKey, iv: &[u8], src: &[u8], dst: &mut [u8]) -> Result { + pub fn symmetric_decrypt( + &self, + key: &AesKey, + iv: &[u8], + src: &[u8], + dst: &mut [u8], + ) -> Result { key.decrypt(src, iv, dst) } } diff --git a/crypto/src/tests/authentication.rs b/crypto/src/tests/authentication.rs index e75e079d7..42c462923 100644 --- a/crypto/src/tests/authentication.rs +++ b/crypto/src/tests/authentication.rs @@ -1,4 +1,7 @@ -use crate::{self as crypto, make_user_name_identity_token, SecurityPolicy, decrypt_user_identity_token_password, random}; +use crate::{ + self as crypto, decrypt_user_identity_token_password, make_user_name_identity_token, random, + SecurityPolicy, +}; use crate::tests::*; @@ -67,7 +70,15 @@ fn user_name_identity_token_encrypted() { // or the correct encryption to happen. // #1 This should be plaintext since channel security policy is none, token policy is empty - let token = make_user_name_identity_token(SecurityPolicy::None, &user_token_policy, nonce.as_ref(), &cert, "user1", &password).unwrap(); + let token = make_user_name_identity_token( + SecurityPolicy::None, + &user_token_policy, + nonce.as_ref(), + &cert, + "user1", + &password, + ) + .unwrap(); assert!(token.encryption_algorithm.is_null()); assert_eq!(token.password.as_ref(), password.as_bytes()); let password1 = decrypt_user_identity_token_password(&token, nonce.as_ref(), &pkey).unwrap(); @@ -75,7 +86,15 @@ fn user_name_identity_token_encrypted() { // #2 This should be plaintext since channel security policy is none, token policy is none user_token_policy.security_policy_uri = UAString::from(SecurityPolicy::None.to_uri()); - let token = make_user_name_identity_token(SecurityPolicy::None, &user_token_policy, nonce.as_ref(), &cert, "user1", &password).unwrap(); + let token = make_user_name_identity_token( + SecurityPolicy::None, + &user_token_policy, + nonce.as_ref(), + &cert, + "user1", + &password, + ) + .unwrap(); assert!(token.encryption_algorithm.is_null()); assert_eq!(token.password.as_ref(), password.as_bytes()); let password1 = decrypt_user_identity_token_password(&token, nonce.as_ref(), &pkey).unwrap(); @@ -83,7 +102,15 @@ fn user_name_identity_token_encrypted() { // #3 This should be Rsa15 since channel security policy is none, token policy is Rsa15 user_token_policy.security_policy_uri = UAString::from(SecurityPolicy::Basic128Rsa15.to_uri()); - let token = make_user_name_identity_token(SecurityPolicy::None, &user_token_policy, nonce.as_ref(), &cert, "user1", &password).unwrap(); + let token = make_user_name_identity_token( + SecurityPolicy::None, + &user_token_policy, + nonce.as_ref(), + &cert, + "user1", + &password, + ) + .unwrap(); assert!(token.encryption_algorithm.is_null()); assert_eq!(token.password.as_ref(), password.as_bytes()); let password1 = decrypt_user_identity_token_password(&token, nonce.as_ref(), &pkey).unwrap(); @@ -91,29 +118,70 @@ fn user_name_identity_token_encrypted() { // #4 This should be Rsa-15 since channel security policy is Rsa15, token policy is empty user_token_policy.security_policy_uri = UAString::null(); - let token = make_user_name_identity_token(SecurityPolicy::Basic128Rsa15, &user_token_policy, nonce.as_ref(), &cert, "user1", &password).unwrap(); - assert_eq!(token.encryption_algorithm.as_ref(), crypto::algorithms::ENC_RSA_15); + let token = make_user_name_identity_token( + SecurityPolicy::Basic128Rsa15, + &user_token_policy, + nonce.as_ref(), + &cert, + "user1", + &password, + ) + .unwrap(); + assert_eq!( + token.encryption_algorithm.as_ref(), + crypto::algorithms::ENC_RSA_15 + ); let password1 = decrypt_user_identity_token_password(&token, nonce.as_ref(), &pkey).unwrap(); assert_eq!(password, password1); // #5 This should be Rsa-OAEP since channel security policy is Rsa-15, token policy is Rsa-OAEP user_token_policy.security_policy_uri = UAString::from(SecurityPolicy::Basic256Sha256.to_uri()); - let token = make_user_name_identity_token(SecurityPolicy::Basic128Rsa15, &user_token_policy, nonce.as_ref(), &cert, "user1", &password).unwrap(); - assert_eq!(token.encryption_algorithm.as_ref(), crypto::algorithms::ENC_RSA_OAEP); + let token = make_user_name_identity_token( + SecurityPolicy::Basic128Rsa15, + &user_token_policy, + nonce.as_ref(), + &cert, + "user1", + &password, + ) + .unwrap(); + assert_eq!( + token.encryption_algorithm.as_ref(), + crypto::algorithms::ENC_RSA_OAEP + ); let password1 = decrypt_user_identity_token_password(&token, nonce.as_ref(), &pkey).unwrap(); assert_eq!(password, password1); // #6 This should be Rsa-OAEP since channel security policy is Rsa-OAEP, token policy is Rsa-OAEP user_token_policy.security_policy_uri = UAString::from(SecurityPolicy::Basic256Sha256.to_uri()); - let token = make_user_name_identity_token(SecurityPolicy::Basic256Sha256, &user_token_policy, nonce.as_ref(), &cert, "user1", &password).unwrap(); - assert_eq!(token.encryption_algorithm.as_ref(), crypto::algorithms::ENC_RSA_OAEP); + let token = make_user_name_identity_token( + SecurityPolicy::Basic256Sha256, + &user_token_policy, + nonce.as_ref(), + &cert, + "user1", + &password, + ) + .unwrap(); + assert_eq!( + token.encryption_algorithm.as_ref(), + crypto::algorithms::ENC_RSA_OAEP + ); let password1 = decrypt_user_identity_token_password(&token, nonce.as_ref(), &pkey).unwrap(); assert_eq!(password, password1); // #7 This should be None since channel security policy is Rsa-15, token policy is None user_token_policy.security_policy_uri = UAString::from(SecurityPolicy::None.to_uri()); - let token = make_user_name_identity_token(SecurityPolicy::Basic128Rsa15, &user_token_policy, nonce.as_ref(), &cert, "user1", &password).unwrap(); + let token = make_user_name_identity_token( + SecurityPolicy::Basic128Rsa15, + &user_token_policy, + nonce.as_ref(), + &cert, + "user1", + &password, + ) + .unwrap(); assert!(token.encryption_algorithm.is_empty()); let password1 = decrypt_user_identity_token_password(&token, nonce.as_ref(), &pkey).unwrap(); assert_eq!(password, password1); -} \ No newline at end of file +} diff --git a/crypto/src/tests/crypto.rs b/crypto/src/tests/crypto.rs index 6265091b2..1899ccb62 100644 --- a/crypto/src/tests/crypto.rs +++ b/crypto/src/tests/crypto.rs @@ -5,16 +5,19 @@ use std::io::Write; use opcua_types::status_code::StatusCode; +use crate::tests::{ + make_certificate_store, make_test_cert_1024, make_test_cert_2048, APPLICATION_HOSTNAME, + APPLICATION_URI, +}; use crate::{ - aeskey::AesKey, certificate_store::*, pkey::{KeySize, PrivateKey, RsaPadding}, + aeskey::AesKey, + certificate_store::*, + pkey::{KeySize, PrivateKey, RsaPadding}, random, - SecurityPolicy, - SHA1_SIZE, - SHA256_SIZE, user_identity::{legacy_password_decrypt, legacy_password_encrypt}, - x509::{X509, X509Data}, + x509::{X509Data, X509}, + SecurityPolicy, SHA1_SIZE, SHA256_SIZE, }; -use crate::tests::{APPLICATION_HOSTNAME, APPLICATION_URI, make_certificate_store, make_test_cert_1024, make_test_cert_2048}; #[test] fn aes_test() { @@ -33,7 +36,11 @@ fn aes_test() { let mut ciphertext = vec![0u8; buf_size]; let ciphertext = { - println!("Plaintext = {}, ciphertext = {}", plaintext.len(), ciphertext.len()); + println!( + "Plaintext = {}, ciphertext = {}", + plaintext.len(), + ciphertext.len() + ); let r = aes_key.encrypt(plaintext, &iv, &mut ciphertext); println!("result = {:?}", r); assert!(r.is_ok()); @@ -120,7 +127,12 @@ fn test_and_reject_application_instance_cert() { // Make an unrecognized cert let (cert, _) = make_test_cert_1024(); - let result = cert_store.validate_or_reject_application_instance_cert(&cert, SecurityPolicy::Basic128Rsa15, None, None); + let result = cert_store.validate_or_reject_application_instance_cert( + &cert, + SecurityPolicy::Basic128Rsa15, + None, + None, + ); assert!(result.is_bad()); drop(tmp_dir); @@ -144,7 +156,12 @@ fn test_and_trust_application_instance_cert() { } // Now validate the cert was stored properly - let result = cert_store.validate_or_reject_application_instance_cert(&cert, SecurityPolicy::Basic128Rsa15, None, None); + let result = cert_store.validate_or_reject_application_instance_cert( + &cert, + SecurityPolicy::Basic128Rsa15, + None, + None, + ); assert!(result.is_good()); drop(tmp_dir); @@ -169,24 +186,40 @@ fn test_and_reject_thumbprint_mismatch() { } // Now validate the cert was rejected because the thumbprint does not match the one on disk - let result = cert_store.validate_or_reject_application_instance_cert(&cert2, SecurityPolicy::Basic128Rsa15, None, None); + let result = cert_store.validate_or_reject_application_instance_cert( + &cert2, + SecurityPolicy::Basic128Rsa15, + None, + None, + ); assert!(result.is_bad()); drop(tmp_dir); } -fn test_asymmetric_encrypt_and_decrypt(cert: &X509, key: &PrivateKey, security_policy: SecurityPolicy, plaintext_size: usize) { - let plaintext = (0..plaintext_size).map(|i| (i % 256) as u8).collect::>(); +fn test_asymmetric_encrypt_and_decrypt( + cert: &X509, + key: &PrivateKey, + security_policy: SecurityPolicy, + plaintext_size: usize, +) { + let plaintext = (0..plaintext_size) + .map(|i| (i % 256) as u8) + .collect::>(); let mut ciphertext = vec![0u8; plaintext_size + 8192]; let mut plaintext2 = vec![0u8; plaintext_size + 8192]; println!("Encrypt with security policy {:?}", security_policy); println!("Encrypting data of length {}", plaintext_size); - let encrypted_size = security_policy.asymmetric_encrypt(&cert.public_key().unwrap(), &plaintext, &mut ciphertext).unwrap(); + let encrypted_size = security_policy + .asymmetric_encrypt(&cert.public_key().unwrap(), &plaintext, &mut ciphertext) + .unwrap(); println!("Encrypted size = {}", encrypted_size); println!("Decrypting cipher text back"); - let decrypted_size = security_policy.asymmetric_decrypt(key, &ciphertext[..encrypted_size], &mut plaintext2).unwrap(); + let decrypted_size = security_policy + .asymmetric_decrypt(key, &ciphertext[..encrypted_size], &mut plaintext2) + .unwrap(); println!("Decrypted size = {}", decrypted_size); assert_eq!(plaintext_size, decrypted_size); @@ -204,7 +237,7 @@ fn asymmetric_encrypt_and_decrypt() { SecurityPolicy::Basic256, SecurityPolicy::Basic256Sha256, SecurityPolicy::Aes128Sha256RsaOaep, - SecurityPolicy::Aes256Sha256RsaPss + SecurityPolicy::Aes256Sha256RsaPss, ] { for data_size in &[0, 1, 127, 128, 129, 255, 256, 257, 13001] { test_asymmetric_encrypt_and_decrypt(&cert, &key, *security_policy, *data_size); @@ -251,7 +284,11 @@ fn calculate_cipher_text_size2() { // The cipher text size function should report exactly the same value as the value returned // by encrypting bytes. This is especially important on boundary values. - for padding in &[RsaPadding::Pkcs1, RsaPadding::OaepSha1, RsaPadding::OaepSha256] { + for padding in &[ + RsaPadding::Pkcs1, + RsaPadding::OaepSha1, + RsaPadding::OaepSha256, + ] { for src_len in 1..550 { let src = vec![127u8; src_len]; @@ -260,13 +297,18 @@ fn calculate_cipher_text_size2() { let mut dst = vec![0u8; expected_size]; let actual_size = public_key.public_encrypt(&src, &mut dst, *padding).unwrap(); if expected_size != actual_size { - println!("Expected size {} != actual size {} for src length {}", expected_size, actual_size, src_len); + println!( + "Expected size {} != actual size {} for src length {}", + expected_size, actual_size, src_len + ); assert_eq!(expected_size, actual_size); } // Decrypt to be sure the data is same as input let mut src2 = vec![0u8; expected_size]; - let src2_len = private_key.private_decrypt(&dst, &mut src2, *padding).unwrap(); + let src2_len = private_key + .private_decrypt(&dst, &mut src2, *padding) + .unwrap(); assert_eq!(src_len, src2_len); assert_eq!(&src[..], &src[..src2_len]); } @@ -287,7 +329,9 @@ fn sign_verify_sha1() { assert!(public_key.verify_sha1(msg, &signature).unwrap()); assert!(!public_key.verify_sha1(msg2, &signature).unwrap()); - assert!(!public_key.verify_sha1(msg, &signature[..signature.len() - 1]).unwrap()); + assert!(!public_key + .verify_sha1(msg, &signature[..signature.len() - 1]) + .unwrap()); signature[0] = !signature[0]; // bitwise not assert!(!public_key.verify_sha1(msg, &signature).unwrap()); } @@ -307,7 +351,9 @@ fn sign_verify_sha256() { assert!(public_key.verify_sha256(msg, &signature).unwrap()); assert!(!public_key.verify_sha256(msg2, &signature).unwrap()); - assert!(!public_key.verify_sha256(msg, &signature[..signature.len() - 1]).unwrap()); + assert!(!public_key + .verify_sha256(msg, &signature[..signature.len() - 1]) + .unwrap()); signature[0] = !signature[0]; // bitwise not assert!(!public_key.verify_sha256(msg, &signature).unwrap()); } @@ -327,7 +373,9 @@ fn sign_verify_sha256_pss() { assert!(public_key.verify_sha256_pss(msg, &signature).unwrap()); assert!(!public_key.verify_sha256_pss(msg2, &signature).unwrap()); - assert!(!public_key.verify_sha256_pss(msg, &signature[..signature.len() - 1]).unwrap()); + assert!(!public_key + .verify_sha256_pss(msg, &signature[..signature.len() - 1]) + .unwrap()); signature[0] = !signature[0]; // bitwise not assert!(!public_key.verify_sha256_pss(msg, &signature).unwrap()); } @@ -345,13 +393,17 @@ fn sign_hmac_sha1() { let mut signature = [0u8; SHA1_SIZE]; assert!(hash::hmac_sha1(key, data, &mut signature).is_ok()); - let expected = "fbdb1d1b18aa6c08324b7d64b71fb76370690e1d".from_hex().unwrap(); + let expected = "fbdb1d1b18aa6c08324b7d64b71fb76370690e1d" + .from_hex() + .unwrap(); assert_eq!(&signature, &expected[..]); let key = b"key"; let data = b"The quick brown fox jumps over the lazy dog"; assert!(hash::hmac_sha1(key, data, &mut signature).is_ok()); - let expected = "de7c9b85b8b78aa6bc8a7a36f70a90701c9db4d9".from_hex().unwrap(); + let expected = "de7c9b85b8b78aa6bc8a7a36f70a90701c9db4d9" + .from_hex() + .unwrap(); assert_eq!(&signature, &expected[..]); assert!(hash::verify_hmac_sha1(key, data, &expected)); @@ -371,13 +423,17 @@ fn sign_hmac_sha256() { let mut signature = [0u8; SHA256_SIZE]; assert!(hash::hmac_sha256(key, data, &mut signature).is_ok()); - let expected = "b613679a0814d9ec772f95d778c35fc5ff1697c493715653c6c712144292c5ad".from_hex().unwrap(); + let expected = "b613679a0814d9ec772f95d778c35fc5ff1697c493715653c6c712144292c5ad" + .from_hex() + .unwrap(); assert_eq!(&signature, &expected[..]); let key = b"key"; let data = b"The quick brown fox jumps over the lazy dog"; assert!(hash::hmac_sha256(key, data, &mut signature).is_ok()); - let expected = "f7bc83f430538424b13298e6aa6fb143ef4d59a14946175997479dbc2d1a3cd8".from_hex().unwrap(); + let expected = "f7bc83f430538424b13298e6aa6fb143ef4d59a14946175997479dbc2d1a3cd8" + .from_hex() + .unwrap(); assert_eq!(&signature, &expected[..]); assert!(hash::verify_hmac_sha256(key, data, &expected)); @@ -388,16 +444,30 @@ fn sign_hmac_sha256() { fn generate_nonce() { // Generate a random nonce through the function and ensure it is the expected length assert!(SecurityPolicy::None.random_nonce().is_null()); - assert_eq!(SecurityPolicy::Basic128Rsa15.random_nonce().as_ref().len(), 16); + assert_eq!( + SecurityPolicy::Basic128Rsa15.random_nonce().as_ref().len(), + 16 + ); assert_eq!(SecurityPolicy::Basic256.random_nonce().as_ref().len(), 32); - assert_eq!(SecurityPolicy::Basic256Sha256.random_nonce().as_ref().len(), 32); + assert_eq!( + SecurityPolicy::Basic256Sha256.random_nonce().as_ref().len(), + 32 + ); } #[test] fn derive_keys_from_nonce() { // Create a pair of "random" nonces. - let nonce1 = vec![0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f]; - let nonce2 = vec![0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f]; + let nonce1 = vec![ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, + 0x1e, 0x1f, + ]; + let nonce2 = vec![ + 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, + 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, + 0x3e, 0x3f, + ]; // Create a security policy Basic128Rsa15 policy // @@ -405,7 +475,8 @@ fn derive_keys_from_nonce() { // b) EncryptingKeyLength = 16 // c) EncryptingBlockSize = 16 let security_policy = SecurityPolicy::Basic128Rsa15; - let (signing_key, encryption_key, iv) = security_policy.make_secure_channel_keys(&nonce1, &nonce2); + let (signing_key, encryption_key, iv) = + security_policy.make_secure_channel_keys(&nonce1, &nonce2); assert_eq!(signing_key.len(), 16); assert_eq!(encryption_key.value().len(), 16); assert_eq!(iv.len(), 16); @@ -416,7 +487,8 @@ fn derive_keys_from_nonce() { // b) EncryptingKeyLength = 32 // c) EncryptingBlockSize = 16 let security_policy = SecurityPolicy::Basic256; - let (signing_key, encryption_key, iv) = security_policy.make_secure_channel_keys(&nonce1, &nonce2); + let (signing_key, encryption_key, iv) = + security_policy.make_secure_channel_keys(&nonce1, &nonce2); assert_eq!(signing_key.len(), 24); assert_eq!(encryption_key.value().len(), 32); assert_eq!(iv.len(), 16); @@ -427,7 +499,8 @@ fn derive_keys_from_nonce() { // b) EncryptingKeyLength = 32 // c) EncryptingBlockSize = 16 let security_policy = SecurityPolicy::Basic256Sha256; - let (signing_key, encryption_key, iv) = security_policy.make_secure_channel_keys(&nonce1, &nonce2); + let (signing_key, encryption_key, iv) = + security_policy.make_secure_channel_keys(&nonce1, &nonce2); assert_eq!(signing_key.len(), 32); assert_eq!(encryption_key.value().len(), 32); assert_eq!(iv.len(), 16); @@ -438,7 +511,8 @@ fn derive_keys_from_nonce() { // b) EncryptingKeyLength = 32 // c) EncryptingBlockSize = 16 let security_policy = SecurityPolicy::Aes128Sha256RsaOaep; - let (signing_key, encryption_key, iv) = security_policy.make_secure_channel_keys(&nonce1, &nonce2); + let (signing_key, encryption_key, iv) = + security_policy.make_secure_channel_keys(&nonce1, &nonce2); assert_eq!(signing_key.len(), 32); assert_eq!(encryption_key.value().len(), 16); assert_eq!(iv.len(), 16); @@ -449,18 +523,42 @@ fn derive_keys_from_nonce_basic128rsa15() { let security_policy = SecurityPolicy::Basic128Rsa15; // This test takes two nonces generated from a real client / server session - let local_nonce = vec![0x88, 0x65, 0x13, 0xb6, 0xee, 0xad, 0x68, 0xa2, 0xcb, 0xa7, 0x29, 0x0f, 0x79, 0xb3, 0x84, 0xf3]; - let remote_nonce = vec![0x17, 0x0c, 0xe8, 0x68, 0x3e, 0xe6, 0xb3, 0x80, 0xb3, 0xf4, 0x67, 0x5c, 0x1e, 0xa2, 0xcc, 0xb1]; + let local_nonce = vec![ + 0x88, 0x65, 0x13, 0xb6, 0xee, 0xad, 0x68, 0xa2, 0xcb, 0xa7, 0x29, 0x0f, 0x79, 0xb3, 0x84, + 0xf3, + ]; + let remote_nonce = vec![ + 0x17, 0x0c, 0xe8, 0x68, 0x3e, 0xe6, 0xb3, 0x80, 0xb3, 0xf4, 0x67, 0x5c, 0x1e, 0xa2, 0xcc, + 0xb1, + ]; // Expected local keys - let local_signing_key: Vec = vec![0x66, 0x58, 0xa5, 0xa7, 0x8c, 0x7d, 0xa8, 0x4e, 0x57, 0xd3, 0x9b, 0x4d, 0x6b, 0xdc, 0x93, 0xad]; - let local_encrypting_key: Vec = vec![0x44, 0x8f, 0x0d, 0x7d, 0x2e, 0x08, 0x99, 0xdd, 0x5b, 0x56, 0x8d, 0xaf, 0x70, 0xc2, 0x26, 0xfc]; - let local_iv = vec![0x6c, 0x83, 0x7c, 0xd1, 0xa8, 0x61, 0xb9, 0xd7, 0xae, 0xdf, 0x2d, 0xe4, 0x85, 0x26, 0x81, 0x89]; + let local_signing_key: Vec = vec![ + 0x66, 0x58, 0xa5, 0xa7, 0x8c, 0x7d, 0xa8, 0x4e, 0x57, 0xd3, 0x9b, 0x4d, 0x6b, 0xdc, 0x93, + 0xad, + ]; + let local_encrypting_key: Vec = vec![ + 0x44, 0x8f, 0x0d, 0x7d, 0x2e, 0x08, 0x99, 0xdd, 0x5b, 0x56, 0x8d, 0xaf, 0x70, 0xc2, 0x26, + 0xfc, + ]; + let local_iv = vec![ + 0x6c, 0x83, 0x7c, 0xd1, 0xa8, 0x61, 0xb9, 0xd7, 0xae, 0xdf, 0x2d, 0xe4, 0x85, 0x26, 0x81, + 0x89, + ]; // Expected remote keys - let remote_signing_key: Vec = vec![0x27, 0x23, 0x92, 0xb7, 0x47, 0xad, 0x48, 0xf6, 0xae, 0x20, 0x30, 0x2f, 0x88, 0x4f, 0x96, 0x40]; - let remote_encrypting_key: Vec = vec![0x85, 0x84, 0x1c, 0xcc, 0xcb, 0x3c, 0x39, 0xd4, 0x14, 0x11, 0xa4, 0xfe, 0x01, 0x5a, 0x0a, 0xcf]; - let remote_iv = vec![0xab, 0xc6, 0x26, 0x78, 0xb9, 0xa4, 0xe6, 0x93, 0x21, 0x9e, 0xc1, 0x7e, 0xd5, 0x8b, 0x0e, 0xf2]; + let remote_signing_key: Vec = vec![ + 0x27, 0x23, 0x92, 0xb7, 0x47, 0xad, 0x48, 0xf6, 0xae, 0x20, 0x30, 0x2f, 0x88, 0x4f, 0x96, + 0x40, + ]; + let remote_encrypting_key: Vec = vec![ + 0x85, 0x84, 0x1c, 0xcc, 0xcb, 0x3c, 0x39, 0xd4, 0x14, 0x11, 0xa4, 0xfe, 0x01, 0x5a, 0x0a, + 0xcf, + ]; + let remote_iv = vec![ + 0xab, 0xc6, 0x26, 0x78, 0xb9, 0xa4, 0xe6, 0x93, 0x21, 0x9e, 0xc1, 0x7e, 0xd5, 0x8b, 0x0e, + 0xf2, + ]; // Make the keys using the two nonce values let local_keys = security_policy.make_secure_channel_keys(&remote_nonce, &local_nonce); @@ -497,7 +595,6 @@ fn certificate_with_hostname_mismatch() { assert_eq!(result, StatusCode::Good); } - #[test] fn certificate_with_application_uri_mismatch() { let (cert, _) = make_test_cert_2048(); @@ -511,7 +608,6 @@ fn certificate_with_application_uri_mismatch() { assert_eq!(result, StatusCode::Good); } - #[test] fn encrypt_decrypt_password() { let password = String::from("abcdef123456"); diff --git a/crypto/src/tests/mod.rs b/crypto/src/tests/mod.rs index 33b382e7a..f2ab1a5f4 100644 --- a/crypto/src/tests/mod.rs +++ b/crypto/src/tests/mod.rs @@ -3,7 +3,9 @@ use tempdir::TempDir; use opcua_types::*; use crate::{ - pkey::PrivateKey, x509::{X509, X509Data}, certificate_store::*, + certificate_store::*, + pkey::PrivateKey, + x509::{X509Data, X509}, }; const APPLICATION_URI: &str = "urn:testapplication"; @@ -24,17 +26,27 @@ fn make_test_cert(key_size: u32) -> (X509, PrivateKey) { organizational_unit: "x.org ops".to_string(), country: "EN".to_string(), state: "London".to_string(), - alt_host_names: vec![APPLICATION_URI.to_string(), "foo".to_string(), "foo2".to_string(), APPLICATION_HOSTNAME.to_string(), "foo3".to_string()], + alt_host_names: vec![ + APPLICATION_URI.to_string(), + "foo".to_string(), + "foo2".to_string(), + APPLICATION_HOSTNAME.to_string(), + "foo3".to_string(), + ], certificate_duration_days: 60, }; let cert = X509::cert_and_pkey(&args); cert.unwrap() } -fn make_test_cert_1024() -> (X509, PrivateKey) { make_test_cert(1024) } +fn make_test_cert_1024() -> (X509, PrivateKey) { + make_test_cert(1024) +} -fn make_test_cert_2048() -> (X509, PrivateKey) { make_test_cert(2048) } +fn make_test_cert_2048() -> (X509, PrivateKey) { + make_test_cert(2048) +} mod authentication; mod crypto; -mod security_policy; \ No newline at end of file +mod security_policy; diff --git a/crypto/src/tests/security_policy.rs b/crypto/src/tests/security_policy.rs index 7e0cb806b..faaf116dd 100644 --- a/crypto/src/tests/security_policy.rs +++ b/crypto/src/tests/security_policy.rs @@ -17,35 +17,110 @@ fn is_deprecated() { #[test] fn from_str() { // Invalid from_str - assert_eq!(SecurityPolicy::from_str("").unwrap(), SecurityPolicy::Unknown); - assert_eq!(SecurityPolicy::from_str("none").unwrap(), SecurityPolicy::Unknown); - assert_eq!(SecurityPolicy::from_str(" None").unwrap(), SecurityPolicy::Unknown); - assert_eq!(SecurityPolicy::from_str("Basic256 ").unwrap(), SecurityPolicy::Unknown); - assert_eq!(SecurityPolicy::from_str("http://opcfoundation.org/UA/SecurityPolicy#").unwrap(), SecurityPolicy::Unknown); + assert_eq!( + SecurityPolicy::from_str("").unwrap(), + SecurityPolicy::Unknown + ); + assert_eq!( + SecurityPolicy::from_str("none").unwrap(), + SecurityPolicy::Unknown + ); + assert_eq!( + SecurityPolicy::from_str(" None").unwrap(), + SecurityPolicy::Unknown + ); + assert_eq!( + SecurityPolicy::from_str("Basic256 ").unwrap(), + SecurityPolicy::Unknown + ); + assert_eq!( + SecurityPolicy::from_str("http://opcfoundation.org/UA/SecurityPolicy#").unwrap(), + SecurityPolicy::Unknown + ); // Valid from str will take either the short name or the URI - assert_eq!(SecurityPolicy::from_str("None").unwrap(), SecurityPolicy::None); - assert_eq!(SecurityPolicy::from_str("http://opcfoundation.org/UA/SecurityPolicy#None").unwrap(), SecurityPolicy::None); - assert_eq!(SecurityPolicy::from_str("Basic128Rsa15").unwrap(), SecurityPolicy::Basic128Rsa15); - assert_eq!(SecurityPolicy::from_str("http://opcfoundation.org/UA/SecurityPolicy#Basic128Rsa15").unwrap(), SecurityPolicy::Basic128Rsa15); - assert_eq!(SecurityPolicy::from_str("Basic256").unwrap(), SecurityPolicy::Basic256); - assert_eq!(SecurityPolicy::from_str("http://opcfoundation.org/UA/SecurityPolicy#Basic256").unwrap(), SecurityPolicy::Basic256); - assert_eq!(SecurityPolicy::from_str("Basic256Sha256").unwrap(), SecurityPolicy::Basic256Sha256); - assert_eq!(SecurityPolicy::from_str("http://opcfoundation.org/UA/SecurityPolicy#Basic256Sha256").unwrap(), SecurityPolicy::Basic256Sha256); - assert_eq!(SecurityPolicy::from_str("Aes128-Sha256-RsaOaep").unwrap(), SecurityPolicy::Aes128Sha256RsaOaep); - assert_eq!(SecurityPolicy::from_str("http://opcfoundation.org/UA/SecurityPolicy#Aes128_Sha256_RsaOaep").unwrap(), SecurityPolicy::Aes128Sha256RsaOaep); - assert_eq!(SecurityPolicy::from_str("Aes256-Sha256-RsaPss").unwrap(), SecurityPolicy::Aes256Sha256RsaPss); - assert_eq!(SecurityPolicy::from_str("http://opcfoundation.org/UA/SecurityPolicy#Aes256_Sha256_RsaPss").unwrap(), SecurityPolicy::Aes256Sha256RsaPss); + assert_eq!( + SecurityPolicy::from_str("None").unwrap(), + SecurityPolicy::None + ); + assert_eq!( + SecurityPolicy::from_str("http://opcfoundation.org/UA/SecurityPolicy#None").unwrap(), + SecurityPolicy::None + ); + assert_eq!( + SecurityPolicy::from_str("Basic128Rsa15").unwrap(), + SecurityPolicy::Basic128Rsa15 + ); + assert_eq!( + SecurityPolicy::from_str("http://opcfoundation.org/UA/SecurityPolicy#Basic128Rsa15") + .unwrap(), + SecurityPolicy::Basic128Rsa15 + ); + assert_eq!( + SecurityPolicy::from_str("Basic256").unwrap(), + SecurityPolicy::Basic256 + ); + assert_eq!( + SecurityPolicy::from_str("http://opcfoundation.org/UA/SecurityPolicy#Basic256").unwrap(), + SecurityPolicy::Basic256 + ); + assert_eq!( + SecurityPolicy::from_str("Basic256Sha256").unwrap(), + SecurityPolicy::Basic256Sha256 + ); + assert_eq!( + SecurityPolicy::from_str("http://opcfoundation.org/UA/SecurityPolicy#Basic256Sha256") + .unwrap(), + SecurityPolicy::Basic256Sha256 + ); + assert_eq!( + SecurityPolicy::from_str("Aes128-Sha256-RsaOaep").unwrap(), + SecurityPolicy::Aes128Sha256RsaOaep + ); + assert_eq!( + SecurityPolicy::from_str( + "http://opcfoundation.org/UA/SecurityPolicy#Aes128_Sha256_RsaOaep" + ) + .unwrap(), + SecurityPolicy::Aes128Sha256RsaOaep + ); + assert_eq!( + SecurityPolicy::from_str("Aes256-Sha256-RsaPss").unwrap(), + SecurityPolicy::Aes256Sha256RsaPss + ); + assert_eq!( + SecurityPolicy::from_str("http://opcfoundation.org/UA/SecurityPolicy#Aes256_Sha256_RsaPss") + .unwrap(), + SecurityPolicy::Aes256Sha256RsaPss + ); } #[test] fn to_uri() { - assert_eq!(SecurityPolicy::None.to_uri(), "http://opcfoundation.org/UA/SecurityPolicy#None"); - assert_eq!(SecurityPolicy::Basic128Rsa15.to_uri(), "http://opcfoundation.org/UA/SecurityPolicy#Basic128Rsa15"); - assert_eq!(SecurityPolicy::Basic256.to_uri(), "http://opcfoundation.org/UA/SecurityPolicy#Basic256"); - assert_eq!(SecurityPolicy::Basic256Sha256.to_uri(), "http://opcfoundation.org/UA/SecurityPolicy#Basic256Sha256"); - assert_eq!(SecurityPolicy::Aes128Sha256RsaOaep.to_uri(), "http://opcfoundation.org/UA/SecurityPolicy#Aes128_Sha256_RsaOaep"); - assert_eq!(SecurityPolicy::Aes256Sha256RsaPss.to_uri(), "http://opcfoundation.org/UA/SecurityPolicy#Aes256_Sha256_RsaPss"); + assert_eq!( + SecurityPolicy::None.to_uri(), + "http://opcfoundation.org/UA/SecurityPolicy#None" + ); + assert_eq!( + SecurityPolicy::Basic128Rsa15.to_uri(), + "http://opcfoundation.org/UA/SecurityPolicy#Basic128Rsa15" + ); + assert_eq!( + SecurityPolicy::Basic256.to_uri(), + "http://opcfoundation.org/UA/SecurityPolicy#Basic256" + ); + assert_eq!( + SecurityPolicy::Basic256Sha256.to_uri(), + "http://opcfoundation.org/UA/SecurityPolicy#Basic256Sha256" + ); + assert_eq!( + SecurityPolicy::Aes128Sha256RsaOaep.to_uri(), + "http://opcfoundation.org/UA/SecurityPolicy#Aes128_Sha256_RsaOaep" + ); + assert_eq!( + SecurityPolicy::Aes256Sha256RsaPss.to_uri(), + "http://opcfoundation.org/UA/SecurityPolicy#Aes256_Sha256_RsaPss" + ); } #[test] @@ -74,4 +149,4 @@ fn is_valid_keylength() { assert!(SecurityPolicy::Aes256Sha256RsaPss.is_valid_keylength(4096)); assert!(!SecurityPolicy::Aes256Sha256RsaPss.is_valid_keylength(1024)); assert!(!SecurityPolicy::Aes256Sha256RsaPss.is_valid_keylength(8192)); -} \ No newline at end of file +} diff --git a/crypto/src/user_identity.rs b/crypto/src/user_identity.rs index fca67e53c..9f581f474 100644 --- a/crypto/src/user_identity.rs +++ b/crypto/src/user_identity.rs @@ -11,21 +11,29 @@ use std::io::{Cursor, Write}; use std::str::FromStr; use opcua_types::{ - ByteString, encoding::{read_u32, write_u32}, + encoding::{read_u32, write_u32}, service_types::{SignatureData, UserNameIdentityToken, UserTokenPolicy, X509IdentityToken}, status_code::StatusCode, - UAString, + ByteString, UAString, }; use super::{KeySize, PrivateKey, RsaPadding, SecurityPolicy, X509}; /// Create a filled in UserNameIdentityToken by using the supplied channel security policy, user token policy, nonce, cert, user name and password. -pub fn make_user_name_identity_token(channel_security_policy: SecurityPolicy, user_token_policy: &UserTokenPolicy, nonce: &[u8], cert: &Option, user: &str, pass: &str) -> Result { +pub fn make_user_name_identity_token( + channel_security_policy: SecurityPolicy, + user_token_policy: &UserTokenPolicy, + nonce: &[u8], + cert: &Option, + user: &str, + pass: &str, +) -> Result { // Create a user token security policy by looking at the uri it wants to use let token_security_policy = if user_token_policy.security_policy_uri.is_empty() { SecurityPolicy::None } else { - let security_policy = SecurityPolicy::from_str(user_token_policy.security_policy_uri.as_ref()).unwrap(); + let security_policy = + SecurityPolicy::from_str(user_token_policy.security_policy_uri.as_ref()).unwrap(); if security_policy != SecurityPolicy::Unknown { security_policy } else { @@ -35,7 +43,9 @@ pub fn make_user_name_identity_token(channel_security_policy: SecurityPolicy, us // Table 179 Opc Part 4 provides a table of which encryption algorithm to use let security_policy = if channel_security_policy == SecurityPolicy::None { - if user_token_policy.security_policy_uri.is_empty() || token_security_policy != SecurityPolicy::None { + if user_token_policy.security_policy_uri.is_empty() + || token_security_policy != SecurityPolicy::None + { SecurityPolicy::None } else { token_security_policy @@ -43,7 +53,9 @@ pub fn make_user_name_identity_token(channel_security_policy: SecurityPolicy, us } else { if user_token_policy.security_policy_uri.is_empty() { channel_security_policy - } else if token_security_policy != SecurityPolicy::None && channel_security_policy != token_security_policy { + } else if token_security_policy != SecurityPolicy::None + && channel_security_policy != token_security_policy + { token_security_policy } else if channel_security_policy == token_security_policy { token_security_policy @@ -68,8 +80,14 @@ pub fn make_user_name_identity_token(channel_security_policy: SecurityPolicy, us } security_policy => { // Create a password which is encrypted using the secure channel info and the user token policy for the endpoint - let password = legacy_password_encrypt(pass, nonce, cert.as_ref().unwrap(), security_policy.asymmetric_encryption_padding())?; - let encryption_algorithm = UAString::from(security_policy.asymmetric_encryption_algorithm()); + let password = legacy_password_encrypt( + pass, + nonce, + cert.as_ref().unwrap(), + security_policy.asymmetric_encryption_padding(), + )?; + let encryption_algorithm = + UAString::from(security_policy.asymmetric_encryption_algorithm()); (password, encryption_algorithm) } }; @@ -83,7 +101,11 @@ pub fn make_user_name_identity_token(channel_security_policy: SecurityPolicy, us } /// Decrypt the password inside of a user identity token. -pub fn decrypt_user_identity_token_password(user_identity_token: &UserNameIdentityToken, server_nonce: &[u8], server_key: &PrivateKey) -> Result { +pub fn decrypt_user_identity_token_password( + user_identity_token: &UserNameIdentityToken, + server_nonce: &[u8], + server_key: &PrivateKey, +) -> Result { if user_identity_token.encryption_algorithm.is_empty() { // Assumed to be UTF-8 plain text user_identity_token.plaintext_password() @@ -99,28 +121,41 @@ pub fn decrypt_user_identity_token_password(user_identity_token: &UserNameIdenti return Err(StatusCode::BadIdentityTokenInvalid); } }; - legacy_password_decrypt(&user_identity_token.password, server_nonce, server_key, padding) + legacy_password_decrypt( + &user_identity_token.password, + server_nonce, + server_key, + padding, + ) } } /// Encrypt a client side user's password using the server nonce and cert. This is described in table 176 /// OPC UA part 4. This function is prefixed "legacy" because 1.04 describes another way of encrypting passwords. -pub fn legacy_password_encrypt(password: &str, server_nonce: &[u8], server_cert: &X509, padding: RsaPadding) -> Result { +pub fn legacy_password_encrypt( + password: &str, + server_nonce: &[u8], + server_cert: &X509, + padding: RsaPadding, +) -> Result { // Message format is size, password, nonce let plaintext_size = 4 + password.len() + server_nonce.len(); let mut src = Cursor::new(vec![0u8; plaintext_size]); // Write the length of the data to be encrypted excluding the length itself) write_u32(&mut src, (plaintext_size - 4) as u32)?; - src.write(password.as_bytes()).map_err(|_| StatusCode::BadEncodingError)?; - src.write(server_nonce).map_err(|_| StatusCode::BadEncodingError)?; + src.write(password.as_bytes()) + .map_err(|_| StatusCode::BadEncodingError)?; + src.write(server_nonce) + .map_err(|_| StatusCode::BadEncodingError)?; // Encrypt the data with the public key from the server's certificate let public_key = server_cert.public_key()?; let cipher_size = public_key.calculate_cipher_text_size(plaintext_size, padding); let mut dst = vec![0u8; cipher_size]; - let actual_size = public_key.public_encrypt(&src.into_inner(), &mut dst, padding) + let actual_size = public_key + .public_encrypt(&src.into_inner(), &mut dst, padding) .map_err(|_| StatusCode::BadEncodingError)?; assert_eq!(actual_size, cipher_size); @@ -130,14 +165,20 @@ pub fn legacy_password_encrypt(password: &str, server_nonce: &[u8], server_cert: /// Decrypt the client's password using the server's nonce and private key. This function is prefixed /// "legacy" because 1.04 describes another way of encrypting passwords. -pub fn legacy_password_decrypt(secret: &ByteString, server_nonce: &[u8], server_key: &PrivateKey, padding: RsaPadding) -> Result { +pub fn legacy_password_decrypt( + secret: &ByteString, + server_nonce: &[u8], + server_key: &PrivateKey, + padding: RsaPadding, +) -> Result { if secret.is_null() { Err(StatusCode::BadDecodingError) } else { // Decrypt the message let src = secret.value.as_ref().unwrap(); let mut dst = vec![0u8; src.len()]; - let actual_size = server_key.private_decrypt(&src, &mut dst, padding) + let actual_size = server_key + .private_decrypt(&src, &mut dst, padding) .map_err(|_| StatusCode::BadEncodingError)?; let mut dst = Cursor::new(dst); @@ -162,7 +203,13 @@ pub fn legacy_password_decrypt(secret: &ByteString, server_nonce: &[u8], server_ } /// Verify that the X509 identity token supplied to a server contains a valid signature. -pub fn verify_x509_identity_token(token: &X509IdentityToken, user_token_signature: &SignatureData, security_policy: SecurityPolicy, server_cert: &X509, server_nonce: &[u8]) -> Result<(), StatusCode> { +pub fn verify_x509_identity_token( + token: &X509IdentityToken, + user_token_signature: &SignatureData, + security_policy: SecurityPolicy, + server_cert: &X509, + server_nonce: &[u8], +) -> Result<(), StatusCode> { // Since it is not obvious at all from the spec what the user token signature is supposed to be, I looked // at the internet for clues: // @@ -176,10 +223,16 @@ pub fn verify_x509_identity_token(token: &X509IdentityToken, user_token_signatur // if the spec actually said this. let signing_cert = super::x509::X509::from_byte_string(&token.certificate_data)?; - let result = super::verify_signature_data(user_token_signature, security_policy, &signing_cert, server_cert, server_nonce); + let result = super::verify_signature_data( + user_token_signature, + security_policy, + &signing_cert, + server_cert, + server_nonce, + ); if result.is_good() { Ok(()) } else { Err(result) } -} \ No newline at end of file +} diff --git a/crypto/src/x509.rs b/crypto/src/x509.rs index 829188d09..59224d4f9 100644 --- a/crypto/src/x509.rs +++ b/crypto/src/x509.rs @@ -21,7 +21,7 @@ use openssl::{ x509::{self, extension::*}, }; -use opcua_types::{ByteString, service_types::ApplicationDescription, status_code::StatusCode}; +use opcua_types::{service_types::ApplicationDescription, status_code::StatusCode, ByteString}; use crate::{ hostname, @@ -101,7 +101,12 @@ impl X509Data { } /// Creates a list of uri + DNS hostnames using the supplied arguments - pub fn alt_host_names(application_uri: &str, addresses: Option>, add_localhost: bool, add_computer_name: bool) -> Vec { + pub fn alt_host_names( + application_uri: &str, + addresses: Option>, + add_localhost: bool, + add_computer_name: bool, + ) -> Vec { // The first name is the application uri let mut result = vec![application_uri.to_string()]; @@ -183,14 +188,15 @@ impl X509 { /// hostnames / ip addresses that the host runs on. pub fn cert_and_pkey(x509_data: &X509Data) -> Result<(Self, PrivateKey), String> { // Create a key pair - let rsa = Rsa::generate(x509_data.key_size) - .map_err(|err| { - format!("Cannot create key pair check error {} and key size {}", err.to_string(), x509_data.key_size) - })?; + let rsa = Rsa::generate(x509_data.key_size).map_err(|err| { + format!( + "Cannot create key pair check error {} and key size {}", + err.to_string(), + x509_data.key_size + ) + })?; let pkey = pkey::PKey::from_rsa(rsa) - .map_err(|err| { - format!("Cannot create key pair check error {}", err.to_string()) - })?; + .map_err(|err| format!("Cannot create key pair check error {}", err.to_string()))?; let pkey = PrivateKey::wrap_private_key(pkey); // Create an X509 cert to hold the public key @@ -206,11 +212,14 @@ impl X509 { let issuer_name = { let mut name = x509::X509NameBuilder::new().unwrap(); // Common name - name.append_entry_by_text("CN", &x509_data.common_name).unwrap(); + name.append_entry_by_text("CN", &x509_data.common_name) + .unwrap(); // Organization - name.append_entry_by_text("O", &x509_data.organization).unwrap(); + name.append_entry_by_text("O", &x509_data.organization) + .unwrap(); // Organizational Unit - name.append_entry_by_text("OU", &x509_data.organizational_unit).unwrap(); + name.append_entry_by_text("OU", &x509_data.organizational_unit) + .unwrap(); // Country name.append_entry_by_text("C", &x509_data.country).unwrap(); // State @@ -222,21 +231,28 @@ impl X509 { let _ = builder.set_issuer_name(&issuer_name); // For Application Instance Certificate specifies how cert may be used - let key_usage = KeyUsage::new(). - digital_signature(). - non_repudiation(). - key_encipherment(). - data_encipherment(). - key_cert_sign(). - build().unwrap(); + let key_usage = KeyUsage::new() + .digital_signature() + .non_repudiation() + .key_encipherment() + .data_encipherment() + .key_cert_sign() + .build() + .unwrap(); let _ = builder.append_extension(key_usage); - let extended_key_usage = ExtendedKeyUsage::new(). - client_auth(). - server_auth().build().unwrap(); + let extended_key_usage = ExtendedKeyUsage::new() + .client_auth() + .server_auth() + .build() + .unwrap(); let _ = builder.append_extension(extended_key_usage); - builder.set_not_before(&Asn1Time::days_from_now(0).unwrap()).unwrap(); - builder.set_not_after(&Asn1Time::days_from_now(x509_data.certificate_duration_days).unwrap()).unwrap(); + builder + .set_not_before(&Asn1Time::days_from_now(0).unwrap()) + .unwrap(); + builder + .set_not_after(&Asn1Time::days_from_now(x509_data.certificate_duration_days).unwrap()) + .unwrap(); builder.set_pubkey(&pkey.value).unwrap(); // Random serial number @@ -254,24 +270,30 @@ impl X509 { if !x509_data.alt_host_names.is_empty() { let subject_alternative_name = { let mut subject_alternative_name = SubjectAlternativeName::new(); - x509_data.alt_host_names.iter().enumerate().for_each(|(i, alt_host_name)| { - if !alt_host_name.is_empty() { - if i == 0 { - // The first entry is the application uri - subject_alternative_name.uri(alt_host_name); - } else if let Ok(_) = alt_host_name.parse::() { - // Treat this as an IPv4 address - subject_alternative_name.ip(alt_host_name); - } else if let Ok(_) = alt_host_name.parse::() { - // Treat this as an IPv6 address - subject_alternative_name.ip(alt_host_name); - } else { - // Treat this as a DNS entries - subject_alternative_name.dns(alt_host_name); + x509_data + .alt_host_names + .iter() + .enumerate() + .for_each(|(i, alt_host_name)| { + if !alt_host_name.is_empty() { + if i == 0 { + // The first entry is the application uri + subject_alternative_name.uri(alt_host_name); + } else if let Ok(_) = alt_host_name.parse::() { + // Treat this as an IPv4 address + subject_alternative_name.ip(alt_host_name); + } else if let Ok(_) = alt_host_name.parse::() { + // Treat this as an IPv6 address + subject_alternative_name.ip(alt_host_name); + } else { + // Treat this as a DNS entries + subject_alternative_name.dns(alt_host_name); + } } - } - }); - subject_alternative_name.build(&builder.x509v3_context(None, None)).unwrap() + }); + subject_alternative_name + .build(&builder.x509v3_context(None, None)) + .unwrap() }; builder.append_extension(subject_alternative_name).unwrap(); } @@ -301,7 +323,8 @@ impl X509 { } pub fn public_key(&self) -> Result { - self.value.public_key() + self.value + .public_key() .map(|pkey| PublicKey::wrap_public_key(pkey)) .map_err(|_| { error!("Cannot obtain public key from certificate"); @@ -335,7 +358,9 @@ impl X509 { // Produces a string such as "CN=foo/C=IE" pub fn subject_name(&self) -> String { use std::ops::Deref; - self.value.subject_name().entries() + self.value + .subject_name() + .entries() .map(|e| { let v = if let Ok(v) = e.data().as_utf8() { v.deref().to_string() @@ -389,25 +414,29 @@ impl X509 { fn subject_alt_names(&self) -> Option> { if let Some(ref alt_names) = self.value.subject_alt_names() { // Skip the application uri - let subject_alt_names = alt_names.iter().skip(1).map(|n| { - if let Some(dnsname) = n.dnsname() { - dnsname.to_string() - } else if let Some(ip) = n.ipaddress() { - if ip.len() == 4 { - let mut addr = [0u8; 4]; - addr[..].clone_from_slice(&ip); - Ipv4Addr::from(addr).to_string() - } else if ip.len() == 16 { - let mut addr = [0u8; 16]; - addr[..].clone_from_slice(&ip); - Ipv6Addr::from(addr).to_string() + let subject_alt_names = alt_names + .iter() + .skip(1) + .map(|n| { + if let Some(dnsname) = n.dnsname() { + dnsname.to_string() + } else if let Some(ip) = n.ipaddress() { + if ip.len() == 4 { + let mut addr = [0u8; 4]; + addr[..].clone_from_slice(&ip); + Ipv4Addr::from(addr).to_string() + } else if ip.len() == 16 { + let mut addr = [0u8; 16]; + addr[..].clone_from_slice(&ip); + Ipv6Addr::from(addr).to_string() + } else { + "".to_string() + } } else { "".to_string() } - } else { - "".to_string() - } - }).collect(); + }) + .collect(); Some(subject_alt_names) } else { None @@ -422,15 +451,22 @@ impl X509 { error!("Hostname is empty"); StatusCode::BadCertificateHostNameInvalid } else if let Some(subject_alt_names) = self.subject_alt_names() { - let found = subject_alt_names.iter().any(|n| { - n.eq_ignore_ascii_case(hostname) - }); + let found = subject_alt_names + .iter() + .any(|n| n.eq_ignore_ascii_case(hostname)); if found { info!("Certificate host name {} is good", hostname); StatusCode::Good } else { - let alt_names = subject_alt_names.iter().map(|n| n.as_ref()).collect::>().join(", "); - error!("Cannot find a matching hostname for input {}, alt names = {}", hostname, alt_names); + let alt_names = subject_alt_names + .iter() + .map(|n| n.as_ref()) + .collect::>() + .join(", "); + error!( + "Cannot find a matching hostname for input {}, alt names = {}", + hostname, alt_names + ); StatusCode::BadCertificateHostNameInvalid } } else { @@ -442,7 +478,10 @@ impl X509 { /// Tests if the supplied application uri matches the uri alt subject name entry on the cert pub fn is_application_uri_valid(&self, application_uri: &str) -> StatusCode { - trace!("is_application_uri_valid against {} on cert", application_uri); + trace!( + "is_application_uri_valid against {} on cert", + application_uri + ); // Expecting the first subject alternative name to be a uri that matches with the supplied // application uri if let Some(ref alt_names) = self.value.subject_alt_names() { @@ -452,7 +491,10 @@ impl X509 { info!("Certificate application uri {} is good", application_uri); StatusCode::Good } else { - error!("Cert application uri {} does not match supplied uri {}", cert_application_uri, application_uri); + error!( + "Cert application uri {} does not match supplied uri {}", + cert_application_uri, application_uri + ); StatusCode::BadCertificateUriInvalid } } else { @@ -477,7 +519,7 @@ impl X509 { /// /// The thumbprint might be used by the server / client for look-up purposes. pub fn thumbprint(&self) -> Thumbprint { - use openssl::hash::{MessageDigest, hash}; + use openssl::hash::{hash, MessageDigest}; let der = self.value.to_der().unwrap(); let digest = hash(MessageDigest::sha1(), &der).unwrap(); Thumbprint::new(&digest) @@ -510,9 +552,10 @@ impl X509 { } else { &date }; - Utc.datetime_from_str(date, "%b %d %H:%M:%S %Y").map_err(|e| { - error!("Cannot parse ASN1 date, err = {:?}", e); - }) + Utc.datetime_from_str(date, "%b %d %H:%M:%S %Y") + .map_err(|e| { + error!("Cannot parse ASN1 date, err = {:?}", e); + }) } } @@ -571,4 +614,3 @@ mod tests { }) } } - diff --git a/integration/src/harness.rs b/integration/src/harness.rs index 841f82d52..9d9956f37 100644 --- a/integration/src/harness.rs +++ b/integration/src/harness.rs @@ -1,8 +1,10 @@ use std::{ path::PathBuf, sync::{ - Arc, atomic::{AtomicUsize, Ordering}, mpsc, mpsc::channel, Mutex, - RwLock, + atomic::{AtomicUsize, Ordering}, + mpsc, + mpsc::channel, + Arc, Mutex, RwLock, }, thread, time, }; @@ -13,12 +15,7 @@ use log::*; use opcua_client::prelude::*; use opcua_console_logging; use opcua_core::{self, runtime_components}; -use opcua_server::{ - self, - builder::ServerBuilder, - config::ServerEndpoint, - prelude::*, -}; +use opcua_server::{self, builder::ServerBuilder, config::ServerEndpoint, prelude::*}; use crate::*; @@ -38,7 +35,11 @@ fn next_port_offset() -> u16 { pub fn hostname() -> String { // To avoid certificate trouble, use the computer's own name for the endpoint let mut names = opcua_crypto::X509Data::computer_hostnames(); - if names.is_empty() { "localhost".to_string() } else { names.remove(0) } + if names.is_empty() { + "localhost".to_string() + } else { + names.remove(0) + } } fn port_from_offset(port_offset: u16) -> u16 { @@ -50,7 +51,9 @@ pub fn endpoint_url(port: u16, path: &str) -> String { format!("opc.tcp://{}:{}{}", hostname(), port, path) } -fn v1_node_id() -> NodeId { NodeId::new(2, "v1") } +fn v1_node_id() -> NodeId { + NodeId::new(2, "v1") +} pub fn stress_node_id(idx: usize) -> NodeId { NodeId::new(2, format!("v{:04}", idx)) @@ -68,7 +71,10 @@ pub fn server_x509_token() -> ServerUserToken { } pub fn client_x509_token() -> IdentityToken { - IdentityToken::X509(PathBuf::from(USER_X509_CERTIFICATE_PATH), PathBuf::from(USER_X509_PRIVATE_KEY_PATH)) + IdentityToken::X509( + PathBuf::from(USER_X509_CERTIFICATE_PATH), + PathBuf::from(USER_X509_PRIVATE_KEY_PATH), + ) } pub fn client_user_token() -> IdentityToken { @@ -90,7 +96,7 @@ pub fn new_server(port: u16) -> Server { let user_token_ids = vec![ opcua_server::prelude::ANONYMOUS_USER_TOKEN_ID, sample_user_id, - x509_user_id + x509_user_id, ]; // Create an OPC UA server with sample configuration and default node set @@ -106,21 +112,95 @@ pub fn new_server(port: u16) -> Server { .user_token(x509_user_id, server_x509_token()) .endpoints( [ - ("none", endpoint_path, SecurityPolicy::None, MessageSecurityMode::None, &user_token_ids), - ("basic128rsa15_sign", endpoint_path, SecurityPolicy::Basic128Rsa15, MessageSecurityMode::Sign, &user_token_ids), - ("basic128rsa15_sign_encrypt", endpoint_path, SecurityPolicy::Basic128Rsa15, MessageSecurityMode::SignAndEncrypt, &user_token_ids), - ("basic256_sign", endpoint_path, SecurityPolicy::Basic256, MessageSecurityMode::Sign, &user_token_ids), - ("basic256_sign_encrypt", endpoint_path, SecurityPolicy::Basic256, MessageSecurityMode::SignAndEncrypt, &user_token_ids), - ("basic256sha256_sign", endpoint_path, SecurityPolicy::Basic256Sha256, MessageSecurityMode::Sign, &user_token_ids), - ("basic256sha256_sign_encrypt", endpoint_path, SecurityPolicy::Basic256Sha256, MessageSecurityMode::SignAndEncrypt, &user_token_ids), - ("endpoint_aes128sha256rsaoaep_sign", endpoint_path, SecurityPolicy::Aes128Sha256RsaOaep, MessageSecurityMode::Sign, &user_token_ids), - ("endpoint_aes128sha256rsaoaep_sign_encrypt", endpoint_path, SecurityPolicy::Aes128Sha256RsaOaep, MessageSecurityMode::SignAndEncrypt, &user_token_ids), - ("endpoint_aes256sha256rsapss_sign", endpoint_path, SecurityPolicy::Aes256Sha256RsaPss, MessageSecurityMode::Sign, &user_token_ids), - ("endpoint_aes256sha256rsapss_sign_encrypt", endpoint_path, SecurityPolicy::Aes256Sha256RsaPss, MessageSecurityMode::SignAndEncrypt, &user_token_ids), - ].iter().map(|v| { - (v.0.to_string(), ServerEndpoint::from((v.1, v.2, v.3, &v.4[..]))) - }).collect()) - .server().unwrap(); + ( + "none", + endpoint_path, + SecurityPolicy::None, + MessageSecurityMode::None, + &user_token_ids, + ), + ( + "basic128rsa15_sign", + endpoint_path, + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::Sign, + &user_token_ids, + ), + ( + "basic128rsa15_sign_encrypt", + endpoint_path, + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::SignAndEncrypt, + &user_token_ids, + ), + ( + "basic256_sign", + endpoint_path, + SecurityPolicy::Basic256, + MessageSecurityMode::Sign, + &user_token_ids, + ), + ( + "basic256_sign_encrypt", + endpoint_path, + SecurityPolicy::Basic256, + MessageSecurityMode::SignAndEncrypt, + &user_token_ids, + ), + ( + "basic256sha256_sign", + endpoint_path, + SecurityPolicy::Basic256Sha256, + MessageSecurityMode::Sign, + &user_token_ids, + ), + ( + "basic256sha256_sign_encrypt", + endpoint_path, + SecurityPolicy::Basic256Sha256, + MessageSecurityMode::SignAndEncrypt, + &user_token_ids, + ), + ( + "endpoint_aes128sha256rsaoaep_sign", + endpoint_path, + SecurityPolicy::Aes128Sha256RsaOaep, + MessageSecurityMode::Sign, + &user_token_ids, + ), + ( + "endpoint_aes128sha256rsaoaep_sign_encrypt", + endpoint_path, + SecurityPolicy::Aes128Sha256RsaOaep, + MessageSecurityMode::SignAndEncrypt, + &user_token_ids, + ), + ( + "endpoint_aes256sha256rsapss_sign", + endpoint_path, + SecurityPolicy::Aes256Sha256RsaPss, + MessageSecurityMode::Sign, + &user_token_ids, + ), + ( + "endpoint_aes256sha256rsapss_sign_encrypt", + endpoint_path, + SecurityPolicy::Aes256Sha256RsaPss, + MessageSecurityMode::SignAndEncrypt, + &user_token_ids, + ), + ] + .iter() + .map(|v| { + ( + v.0.to_string(), + ServerEndpoint::from((v.1, v.2, v.3, &v.4[..])), + ) + }) + .collect(), + ) + .server() + .unwrap(); // Allow untrusted access to the server { @@ -144,18 +224,23 @@ pub fn new_server(port: u16) -> Server { // Add variables let _ = address_space.add_variables( vec![Variable::new(&v1_node, "v1", "v1", 0 as i32)], - &sample_folder_id); + &sample_folder_id, + ); // Register a getter for the variable if let Some(ref mut v) = address_space.find_variable_mut(v1_node.clone()) { - let getter = AttrFnGetter::new(move |_, _, _, _, _, _| -> Result, StatusCode> { - Ok(Some(DataValue::new_now(100))) - }); + let getter = AttrFnGetter::new( + move |_, _, _, _, _, _| -> Result, StatusCode> { + Ok(Some(DataValue::new_now(100))) + }, + ); v.set_value_getter(Arc::new(Mutex::new(getter))); } // Add a bunch of sequential vars too, similar to demo-server - let node_ids = (0..1000).map(|i| stress_node_id(i)).collect::>(); + let node_ids = (0..1000) + .map(|i| stress_node_id(i)) + .collect::>(); let folder_id = address_space .add_folder("Stress", "Stress", &NodeId::objects_folder_id()) .unwrap(); @@ -181,7 +266,8 @@ fn new_client(_port: u16) -> Client { .pki_dir("./pki-client") .create_sample_keypair(true) .trust_server_certs(true) - .client().unwrap() + .client() + .unwrap() } pub fn new_client_server(port: u16) -> (Client, Server) { @@ -203,7 +289,7 @@ pub enum ClientResponse { #[derive(Debug, Clone, Copy, PartialEq)] pub enum ServerCommand { - Quit + Quit, } #[derive(Debug, Clone, Copy, PartialEq)] @@ -213,9 +299,15 @@ pub enum ServerResponse { Finished(bool), } -pub fn perform_test(client: Client, server: Server, client_test: Option, server_test: ST) - where CT: FnOnce(mpsc::Receiver, Client) + Send + 'static, - ST: FnOnce(mpsc::Receiver, Server) + Send + 'static { +pub fn perform_test( + client: Client, + server: Server, + client_test: Option, + server_test: ST, +) where + CT: FnOnce(mpsc::Receiver, Client) + Send + 'static, + ST: FnOnce(mpsc::Receiver, Server) + Send + 'static, +{ opcua_console_logging::init(); // Spawn the CLIENT thread @@ -245,7 +337,10 @@ pub fn perform_test(client: Client, server: Server, client_test: Option< trace!("No client test"); true }; - info!("Client test has completed, sending ClientResponse::Finished({:?})", result); + info!( + "Client test has completed, sending ClientResponse::Finished({:?})", + result + ); let _ = tx_client_response.send(ClientResponse::Finished(result)); info!("Client thread has finished"); }); @@ -266,7 +361,10 @@ pub fn perform_test(client: Client, server: Server, client_test: Option< server_test(rx_server_command, server); let result = true; - info!("Server test has completed, sending ServerResponse::Finished({:?})", result); + info!( + "Server test has completed, sending ServerResponse::Finished({:?})", + result + ); let _ = tx_server_response.send(ServerResponse::Finished(result)); info!("Server thread has finished"); }); @@ -294,7 +392,11 @@ pub fn perform_test(client: Client, server: Server, client_test: Option< error!("Test timed out after {} ms", elapsed.num_milliseconds()); error!("Running components:\n {}", { let components = runtime_components!(); - components.iter().cloned().collect::>().join("\n ") + components + .iter() + .cloned() + .collect::>() + .join("\n ") }); panic!("Timeout"); @@ -355,17 +457,34 @@ pub fn perform_test(client: Client, server: Server, client_test: Option< info!("test complete") } -pub fn get_endpoints_client_test(server_url: &str, _identity_token: IdentityToken, _rx_client_command: mpsc::Receiver, client: Client) { +pub fn get_endpoints_client_test( + server_url: &str, + _identity_token: IdentityToken, + _rx_client_command: mpsc::Receiver, + client: Client, +) { let endpoints = client.get_server_endpoints_from_url(server_url).unwrap(); // Value should match number of expected endpoints assert_eq!(endpoints.len(), 11); } -pub fn regular_client_test(client_endpoint: T, identity_token: IdentityToken, _rx_client_command: mpsc::Receiver, mut client: Client) where T: Into { +pub fn regular_client_test( + client_endpoint: T, + identity_token: IdentityToken, + _rx_client_command: mpsc::Receiver, + mut client: Client, +) where + T: Into, +{ // Connect to the server let client_endpoint = client_endpoint.into(); - info!("Client will try to connect to endpoint {:?}", client_endpoint); - let session = client.connect_to_endpoint(client_endpoint, identity_token).unwrap(); + info!( + "Client will try to connect to endpoint {:?}", + client_endpoint + ); + let session = client + .connect_to_endpoint(client_endpoint, identity_token) + .unwrap(); let mut session = session.write().unwrap(); // Read the variable @@ -381,11 +500,23 @@ pub fn regular_client_test(client_endpoint: T, identity_token: IdentityToken, session.disconnect(); } -pub fn inactive_session_client_test(client_endpoint: T, identity_token: IdentityToken, _rx_client_command: mpsc::Receiver, mut client: Client) where T: Into { +pub fn inactive_session_client_test( + client_endpoint: T, + identity_token: IdentityToken, + _rx_client_command: mpsc::Receiver, + mut client: Client, +) where + T: Into, +{ // Connect to the server let client_endpoint = client_endpoint.into(); - info!("Client will try to connect to endpoint {:?}", client_endpoint); - let session = client.connect_to_endpoint(client_endpoint, identity_token).unwrap(); + info!( + "Client will try to connect to endpoint {:?}", + client_endpoint + ); + let session = client + .connect_to_endpoint(client_endpoint, identity_token) + .unwrap(); let mut session = session.write().unwrap(); // Read the variable and expect that to fail @@ -433,27 +564,59 @@ pub fn regular_server_test(rx_server_command: mpsc::Receiver, ser } } -pub fn connect_with_client_test(port: u16, client_test: CT) where CT: FnOnce(mpsc::Receiver, Client) + Send + 'static { +pub fn connect_with_client_test(port: u16, client_test: CT) +where + CT: FnOnce(mpsc::Receiver, Client) + Send + 'static, +{ let (client, server) = new_client_server(port); perform_test(client, server, Some(client_test), regular_server_test); } pub fn connect_with_get_endpoints(port: u16) { - connect_with_client_test(port, move |rx_client_command: mpsc::Receiver, client: Client| { - get_endpoints_client_test(&endpoint_url(port, "/"), IdentityToken::Anonymous, rx_client_command, client); - }); + connect_with_client_test( + port, + move |rx_client_command: mpsc::Receiver, client: Client| { + get_endpoints_client_test( + &endpoint_url(port, "/"), + IdentityToken::Anonymous, + rx_client_command, + client, + ); + }, + ); } -pub fn connect_with_invalid_active_session(port: u16, mut client_endpoint: EndpointDescription, identity_token: IdentityToken) { - client_endpoint.endpoint_url = UAString::from(endpoint_url(port, client_endpoint.endpoint_url.as_ref())); - connect_with_client_test(port, move |rx_client_command: mpsc::Receiver, client: Client| { - inactive_session_client_test(client_endpoint, identity_token, rx_client_command, client); - }); +pub fn connect_with_invalid_active_session( + port: u16, + mut client_endpoint: EndpointDescription, + identity_token: IdentityToken, +) { + client_endpoint.endpoint_url = + UAString::from(endpoint_url(port, client_endpoint.endpoint_url.as_ref())); + connect_with_client_test( + port, + move |rx_client_command: mpsc::Receiver, client: Client| { + inactive_session_client_test( + client_endpoint, + identity_token, + rx_client_command, + client, + ); + }, + ); } -pub fn connect_with(port: u16, mut client_endpoint: EndpointDescription, identity_token: IdentityToken) { - client_endpoint.endpoint_url = UAString::from(endpoint_url(port, client_endpoint.endpoint_url.as_ref())); - connect_with_client_test(port, move |rx_client_command: mpsc::Receiver, client: Client| { - regular_client_test(client_endpoint, identity_token, rx_client_command, client); - }); +pub fn connect_with( + port: u16, + mut client_endpoint: EndpointDescription, + identity_token: IdentityToken, +) { + client_endpoint.endpoint_url = + UAString::from(endpoint_url(port, client_endpoint.endpoint_url.as_ref())); + connect_with_client_test( + port, + move |rx_client_command: mpsc::Receiver, client: Client| { + regular_client_test(client_endpoint, identity_token, rx_client_command, client); + }, + ); } diff --git a/integration/src/main.rs b/integration/src/main.rs index 5573f8c2c..058128373 100644 --- a/integration/src/main.rs +++ b/integration/src/main.rs @@ -9,4 +9,4 @@ pub const CLIENT_X509_ID: &str = "x509"; mod tests; #[cfg(test)] -mod harness; \ No newline at end of file +mod harness; diff --git a/integration/src/tests.rs b/integration/src/tests.rs index a5dd1a48e..167d93b78 100644 --- a/integration/src/tests.rs +++ b/integration/src/tests.rs @@ -2,62 +2,111 @@ use chrono::Utc; use log::*; use opcua_client::prelude::*; use opcua_console_logging; -use opcua_server::{ - self, - prelude::*, -}; +use opcua_server::{self, prelude::*}; use std::{ - sync::{ - Arc, mpsc, mpsc::channel, - RwLock, - }, + sync::{mpsc, mpsc::channel, Arc, RwLock}, thread, }; use crate::harness::*; fn endpoint_none() -> EndpointDescription { - ("/", SecurityPolicy::None.to_str(), MessageSecurityMode::None).into() + ( + "/", + SecurityPolicy::None.to_str(), + MessageSecurityMode::None, + ) + .into() } fn endpoint_basic128rsa15_sign() -> EndpointDescription { - ("/", SecurityPolicy::Basic128Rsa15.to_str(), MessageSecurityMode::Sign).into() + ( + "/", + SecurityPolicy::Basic128Rsa15.to_str(), + MessageSecurityMode::Sign, + ) + .into() } fn endpoint_basic128rsa15_sign_encrypt() -> EndpointDescription { - ("/", SecurityPolicy::Basic128Rsa15.to_str(), MessageSecurityMode::SignAndEncrypt).into() + ( + "/", + SecurityPolicy::Basic128Rsa15.to_str(), + MessageSecurityMode::SignAndEncrypt, + ) + .into() } fn endpoint_basic256_sign() -> EndpointDescription { - ("/", SecurityPolicy::Basic256.to_str(), MessageSecurityMode::Sign).into() + ( + "/", + SecurityPolicy::Basic256.to_str(), + MessageSecurityMode::Sign, + ) + .into() } fn endpoint_basic256_sign_encrypt() -> EndpointDescription { - ("/", SecurityPolicy::Basic256.to_str(), MessageSecurityMode::SignAndEncrypt).into() + ( + "/", + SecurityPolicy::Basic256.to_str(), + MessageSecurityMode::SignAndEncrypt, + ) + .into() } fn endpoint_basic256sha256_sign() -> EndpointDescription { - ("/", SecurityPolicy::Basic256Sha256.to_str(), MessageSecurityMode::Sign).into() + ( + "/", + SecurityPolicy::Basic256Sha256.to_str(), + MessageSecurityMode::Sign, + ) + .into() } fn endpoint_basic256sha256_sign_encrypt() -> EndpointDescription { - ("/", SecurityPolicy::Basic256Sha256.to_str(), MessageSecurityMode::SignAndEncrypt).into() + ( + "/", + SecurityPolicy::Basic256Sha256.to_str(), + MessageSecurityMode::SignAndEncrypt, + ) + .into() } fn endpoint_aes128sha256rsaoaep_sign() -> EndpointDescription { - ("/", SecurityPolicy::Aes128Sha256RsaOaep.to_str(), MessageSecurityMode::Sign).into() + ( + "/", + SecurityPolicy::Aes128Sha256RsaOaep.to_str(), + MessageSecurityMode::Sign, + ) + .into() } fn endpoint_aes128sha256rsaoaep_sign_encrypt() -> EndpointDescription { - ("/", SecurityPolicy::Aes128Sha256RsaOaep.to_str(), MessageSecurityMode::SignAndEncrypt).into() + ( + "/", + SecurityPolicy::Aes128Sha256RsaOaep.to_str(), + MessageSecurityMode::SignAndEncrypt, + ) + .into() } fn endpoint_aes256sha256rsapss_sign() -> EndpointDescription { - ("/", SecurityPolicy::Aes256Sha256RsaPss.to_str(), MessageSecurityMode::Sign).into() + ( + "/", + SecurityPolicy::Aes256Sha256RsaPss.to_str(), + MessageSecurityMode::Sign, + ) + .into() } fn endpoint_aes256sha256rsapss_sign_encrypt() -> EndpointDescription { - ("/", SecurityPolicy::Aes256Sha256RsaPss.to_str(), MessageSecurityMode::SignAndEncrypt).into() + ( + "/", + SecurityPolicy::Aes256Sha256RsaPss.to_str(), + MessageSecurityMode::SignAndEncrypt, + ) + .into() } /// This is the most basic integration test starting the server on a thread, setting an abort flag @@ -96,7 +145,10 @@ fn server_abort() { let now = Utc::now(); let elapsed = now.signed_duration_since(start_time.clone()); if elapsed.num_milliseconds() > timeout { - panic!("Abort test timed out after {} ms", elapsed.num_milliseconds()); + panic!( + "Abort test timed out after {} ms", + elapsed.num_milliseconds() + ); } } } @@ -106,8 +158,8 @@ fn server_abort() { #[test] #[ignore] fn hello_timeout() { - use std::net::TcpStream; use std::io::Read; + use std::net::TcpStream; let port = next_port(); // For this test we want to set the hello timeout to a low value for the sake of speed. @@ -118,7 +170,9 @@ fn hello_timeout() { let client_test = move |_rx_client_command: mpsc::Receiver, _client: Client| { // Client will open a socket, and sit there waiting for the socket to close, which should happen in under the timeout_wait_duration - let timeout_wait_duration = std::time::Duration::from_secs(opcua_server::constants::DEFAULT_HELLO_TIMEOUT_SECONDS as u64 + 3); + let timeout_wait_duration = std::time::Duration::from_secs( + opcua_server::constants::DEFAULT_HELLO_TIMEOUT_SECONDS as u64 + 3, + ); let host = crate::harness::hostname(); let address = (host.as_ref(), port); @@ -138,7 +192,10 @@ fn hello_timeout() { match result { Ok(v) => { if v > 0 { - panic!("Hello timeout exceeded and socket is still open, result = {}", v) + panic!( + "Hello timeout exceeded and socket is still open, result = {}", + v + ) } else { // From debug!("Client got a read of 0 bytes on the socket, so treating by terminating with success"); @@ -146,7 +203,10 @@ fn hello_timeout() { } } Err(err) => { - debug!("Client got an error {:?} on the socket terminating successfully", err); + debug!( + "Client got an error {:?} on the socket terminating successfully", + err + ); break; } } @@ -179,7 +239,11 @@ fn connect_none() { #[ignore] fn connect_basic128rsa15_sign() { // Connect a session with Basic128Rsa and Sign - connect_with(next_port(), endpoint_basic128rsa15_sign(), IdentityToken::Anonymous); + connect_with( + next_port(), + endpoint_basic128rsa15_sign(), + IdentityToken::Anonymous, + ); } /// Connect to the server using Basic128Rsa15 + SignEncrypt @@ -187,7 +251,11 @@ fn connect_basic128rsa15_sign() { #[ignore] fn connect_basic128rsa15_sign_and_encrypt() { // Connect a session with Basic128Rsa and SignAndEncrypt - connect_with(next_port(), endpoint_basic128rsa15_sign_encrypt(), IdentityToken::Anonymous); + connect_with( + next_port(), + endpoint_basic128rsa15_sign_encrypt(), + IdentityToken::Anonymous, + ); } /// Connect to the server using Basic256 + Sign @@ -195,7 +263,11 @@ fn connect_basic128rsa15_sign_and_encrypt() { #[ignore] fn connect_basic256_sign() { // Connect a session with Basic256 and Sign - connect_with(next_port(), endpoint_basic256_sign(), IdentityToken::Anonymous); + connect_with( + next_port(), + endpoint_basic256_sign(), + IdentityToken::Anonymous, + ); } /// Connect to the server using Basic256 + SignEncrypt @@ -203,7 +275,11 @@ fn connect_basic256_sign() { #[ignore] fn connect_basic256_sign_and_encrypt() { // Connect a session with Basic256 and SignAndEncrypt - connect_with(next_port(), endpoint_basic256_sign_encrypt(), IdentityToken::Anonymous); + connect_with( + next_port(), + endpoint_basic256_sign_encrypt(), + IdentityToken::Anonymous, + ); } /// Connect to the server using Basic256Sha256 + Sign @@ -211,42 +287,66 @@ fn connect_basic256_sign_and_encrypt() { #[ignore] fn connect_basic256sha256_sign() { // Connect a session with Basic256Sha256 and Sign - connect_with(next_port(), endpoint_basic256sha256_sign(), IdentityToken::Anonymous); + connect_with( + next_port(), + endpoint_basic256sha256_sign(), + IdentityToken::Anonymous, + ); } /// Connect to the server using Basic256Sha256 + SignEncrypt #[test] #[ignore] fn connect_basic256sha256_sign_and_encrypt() { - connect_with(next_port(), endpoint_basic256sha256_sign_encrypt(), IdentityToken::Anonymous); + connect_with( + next_port(), + endpoint_basic256sha256_sign_encrypt(), + IdentityToken::Anonymous, + ); } /// Connect to the server using Aes128Sha256RsaOaep + Sign #[test] #[ignore] fn connect_aes128sha256rsaoaep_sign() { - connect_with(next_port(), endpoint_aes128sha256rsaoaep_sign(), IdentityToken::Anonymous); + connect_with( + next_port(), + endpoint_aes128sha256rsaoaep_sign(), + IdentityToken::Anonymous, + ); } /// Connect to the server using Aes128Sha256RsaOaep + SignEncrypt #[test] #[ignore] fn connect_aes128sha256rsaoaep_sign_encrypt() { - connect_with(next_port(), endpoint_aes128sha256rsaoaep_sign_encrypt(), IdentityToken::Anonymous); + connect_with( + next_port(), + endpoint_aes128sha256rsaoaep_sign_encrypt(), + IdentityToken::Anonymous, + ); } /// Connect to the server using Aes128Sha256RsaOaep + Sign #[test] #[ignore] fn connect_aes256sha256rsapss_sign() { - connect_with(next_port(), endpoint_aes256sha256rsapss_sign(), IdentityToken::Anonymous); + connect_with( + next_port(), + endpoint_aes256sha256rsapss_sign(), + IdentityToken::Anonymous, + ); } /// Connect to the server using Aes128Sha256RsaOaep + SignEncrypt #[test] #[ignore] fn connect_aes256sha256rsapss_sign_encrypt() { - connect_with(next_port(), endpoint_aes256sha256rsapss_sign_encrypt(), IdentityToken::Anonymous); + connect_with( + next_port(), + endpoint_aes256sha256rsapss_sign_encrypt(), + IdentityToken::Anonymous, + ); } /// Connect to the server user/pass @@ -254,21 +354,33 @@ fn connect_aes256sha256rsapss_sign_encrypt() { #[ignore] fn connect_basic128rsa15_with_username_password() { // Connect a session using username/password token - connect_with(next_port(), endpoint_basic128rsa15_sign_encrypt(), client_user_token()); + connect_with( + next_port(), + endpoint_basic128rsa15_sign_encrypt(), + client_user_token(), + ); } /// Connect a session using an invalid username/password token and expect it to fail #[test] #[ignore] fn connect_basic128rsa15_with_invalid_username_password() { - connect_with_invalid_active_session(next_port(), endpoint_basic128rsa15_sign_encrypt(), client_invalid_user_token()); + connect_with_invalid_active_session( + next_port(), + endpoint_basic128rsa15_sign_encrypt(), + client_invalid_user_token(), + ); } /// Connect a session using an X509 key and certificate #[test] #[ignore] fn connect_basic128rsa15_with_x509_token() { - connect_with(next_port(), endpoint_basic128rsa15_sign_encrypt(), client_x509_token()); + connect_with( + next_port(), + endpoint_basic128rsa15_sign_encrypt(), + client_x509_token(), + ); } /// Connect to a server, read a variable, write a value to the variable, read the variable to verify it changed @@ -279,50 +391,58 @@ fn read_write_read() { let port = next_port(); let identity_token = client_x509_token(); - client_endpoint.endpoint_url = UAString::from(endpoint_url(port, client_endpoint.endpoint_url.as_ref())); - connect_with_client_test(port, move |_rx_client_command: mpsc::Receiver, mut client: Client| { - info!("Client will try to connect to endpoint {:?}", client_endpoint); - let session = client.connect_to_endpoint(client_endpoint, identity_token).unwrap(); - - let node_id = stress_node_id(1); - - // Read the existing value - { - let mut session = session.write().unwrap(); - let results = session.read(&[ - node_id.clone().into() - ]).unwrap(); - let value = &results[0]; - debug!("value = {:?}", value); - assert_eq!(*value.value.as_ref().unwrap(), Variant::Int32(0)) - } + client_endpoint.endpoint_url = + UAString::from(endpoint_url(port, client_endpoint.endpoint_url.as_ref())); + connect_with_client_test( + port, + move |_rx_client_command: mpsc::Receiver, mut client: Client| { + info!( + "Client will try to connect to endpoint {:?}", + client_endpoint + ); + let session = client + .connect_to_endpoint(client_endpoint, identity_token) + .unwrap(); + + let node_id = stress_node_id(1); + + // Read the existing value + { + let mut session = session.write().unwrap(); + let results = session.read(&[node_id.clone().into()]).unwrap(); + let value = &results[0]; + debug!("value = {:?}", value); + assert_eq!(*value.value.as_ref().unwrap(), Variant::Int32(0)) + } - { - let mut session = session.write().unwrap(); - let results = session.write(&[WriteValue { - node_id: node_id.clone(), - attribute_id: AttributeId::Value as u32, - index_range: UAString::null(), - value: Variant::Int32(1).into(), - }]).unwrap().unwrap(); - let value = results[0]; - assert_eq!(value, StatusCode::Good); - } + { + let mut session = session.write().unwrap(); + let results = session + .write(&[WriteValue { + node_id: node_id.clone(), + attribute_id: AttributeId::Value as u32, + index_range: UAString::null(), + value: Variant::Int32(1).into(), + }]) + .unwrap() + .unwrap(); + let value = results[0]; + assert_eq!(value, StatusCode::Good); + } - { - let mut session = session.write().unwrap(); - let results = session.read(&[ - node_id.into() - ]).unwrap(); - let value = &results[0]; - assert_eq!(*value.value.as_ref().unwrap(), Variant::Int32(1)) - } + { + let mut session = session.write().unwrap(); + let results = session.read(&[node_id.into()]).unwrap(); + let value = &results[0]; + assert_eq!(*value.value.as_ref().unwrap(), Variant::Int32(1)) + } - { - let mut session = session.write().unwrap(); - session.disconnect(); - } - }); + { + let mut session = session.write().unwrap(); + session.disconnect(); + } + }, + ); } /// Connect with the server and attempt to subscribe and monitor 1000 variables @@ -333,27 +453,44 @@ fn subscribe_1000() { let port = next_port(); let identity_token = client_x509_token(); - client_endpoint.endpoint_url = UAString::from(endpoint_url(port, client_endpoint.endpoint_url.as_ref())); - connect_with_client_test(port, move |_rx_client_command: mpsc::Receiver, mut client: Client| { - info!("Client will try to connect to endpoint {:?}", client_endpoint); - let session = client.connect_to_endpoint(client_endpoint, identity_token).unwrap(); - let mut session = session.write().unwrap(); - - let start_time = Utc::now(); - - // Create subscription - let subscription_id = session.create_subscription(2000.0f64, 100, 100, 0, 0, true, DataChangeCallback::new(|_| { - panic!("This shouldn't be called"); - })).unwrap(); - - // NOTE: There is a default limit of 1000 items in arrays, so this list will go from 1 to 1000 inclusive + client_endpoint.endpoint_url = + UAString::from(endpoint_url(port, client_endpoint.endpoint_url.as_ref())); + connect_with_client_test( + port, + move |_rx_client_command: mpsc::Receiver, mut client: Client| { + info!( + "Client will try to connect to endpoint {:?}", + client_endpoint + ); + let session = client + .connect_to_endpoint(client_endpoint, identity_token) + .unwrap(); + let mut session = session.write().unwrap(); - // Create monitored items - the last one does not exist so expect that to fail - let items_to_create = (0..1000) - .map(|i| i + 1) // From v0001 to v1000 - .map(|i| (i, stress_node_id(i))) - .map(|(i, node_id)| { - MonitoredItemCreateRequest { + let start_time = Utc::now(); + + // Create subscription + let subscription_id = session + .create_subscription( + 2000.0f64, + 100, + 100, + 0, + 0, + true, + DataChangeCallback::new(|_| { + panic!("This shouldn't be called"); + }), + ) + .unwrap(); + + // NOTE: There is a default limit of 1000 items in arrays, so this list will go from 1 to 1000 inclusive + + // Create monitored items - the last one does not exist so expect that to fail + let items_to_create = (0..1000) + .map(|i| i + 1) // From v0001 to v1000 + .map(|i| (i, stress_node_id(i))) + .map(|(i, node_id)| MonitoredItemCreateRequest { item_to_monitor: node_id.into(), monitoring_mode: MonitoringMode::Reporting, requested_parameters: MonitoringParameters { @@ -363,25 +500,27 @@ fn subscribe_1000() { queue_size: 1, discard_oldest: true, }, + }) + .collect::>(); + + let elapsed = Utc::now() - start_time; + assert!(elapsed.num_milliseconds() < 500i64); + error!("Elapsed time = {}ms", elapsed.num_milliseconds()); + + let results = session + .create_monitored_items(subscription_id, TimestampsToReturn::Both, &items_to_create) + .unwrap(); + results.iter().enumerate().for_each(|(i, result)| { + if i == 999 { + // Last idx var does not exist so expect it to fail + error!("Checkout {}", result.status_code); + assert!(result.status_code.is_bad()); + } else { + assert!(result.status_code.is_good()); } - }) - .collect::>(); - - let elapsed = Utc::now() - start_time; - assert!(elapsed.num_milliseconds() < 500i64); - error!("Elapsed time = {}ms", elapsed.num_milliseconds()); - - let results = session.create_monitored_items(subscription_id, TimestampsToReturn::Both, &items_to_create).unwrap(); - results.iter().enumerate().for_each(|(i, result)| { - if i == 999 { - // Last idx var does not exist so expect it to fail - error!("Checkout {}", result.status_code); - assert!(result.status_code.is_bad()); - } else { - assert!(result.status_code.is_good()); - } - }); + }); - session.disconnect(); - }); + session.disconnect(); + }, + ); } diff --git a/samples/chess-server/src/game.rs b/samples/chess-server/src/game.rs index adc3b9d81..5fd824f59 100644 --- a/samples/chess-server/src/game.rs +++ b/samples/chess-server/src/game.rs @@ -284,7 +284,16 @@ impl Game { pub fn as_fen(&self) -> String { let mut result = String::with_capacity(80); - let ranks = [Rank::R8, Rank::R7, Rank::R6, Rank::R5, Rank::R4, Rank::R3, Rank::R2, Rank::R1]; + let ranks = [ + Rank::R8, + Rank::R7, + Rank::R6, + Rank::R5, + Rank::R4, + Rank::R3, + Rank::R2, + Rank::R1, + ]; for r in ranks.iter() { result.push_str(&self.fen_rank(*r)); result.push(if *r != Rank::R1 { '/' } else { ' ' }); diff --git a/samples/chess-server/src/main.rs b/samples/chess-server/src/main.rs index 58fef33c4..77e5bdaf2 100644 --- a/samples/chess-server/src/main.rs +++ b/samples/chess-server/src/main.rs @@ -14,20 +14,20 @@ mod game; // These are squares on the board which will become variables with the same // name const BOARD_SQUARES: [&'static str; 64] = [ - "a8", "b8", "c8", "d8", "e8", "f8", "g8", "h8", - "a7", "b7", "c7", "d7", "e7", "f7", "g7", "h7", - "a6", "b6", "c6", "d6", "e6", "f6", "g6", "h6", - "a5", "b5", "c5", "d5", "e5", "f5", "g5", "h5", - "a4", "b4", "c4", "d4", "e4", "f4", "g4", "h4", - "a3", "b3", "c3", "d3", "e3", "f3", "g3", "h3", - "a2", "b2", "c2", "d2", "e2", "f2", "g2", "h2", - "a1", "b1", "c1", "d1", "e1", "f1", "g1", "h1", + "a8", "b8", "c8", "d8", "e8", "f8", "g8", "h8", "a7", "b7", "c7", "d7", "e7", "f7", "g7", "h7", + "a6", "b6", "c6", "d6", "e6", "f6", "g6", "h6", "a5", "b5", "c5", "d5", "e5", "f5", "g5", "h5", + "a4", "b4", "c4", "d4", "e4", "f4", "g4", "h4", "a3", "b3", "c3", "d3", "e3", "f3", "g3", "h3", + "a2", "b2", "c2", "d2", "e2", "f2", "g2", "h2", "a1", "b1", "c1", "d1", "e1", "f1", "g1", "h1", ]; fn default_engine_path() -> String { // This is the default chess engine that will be launched absent of one being passed on the // command line. - String::from(if cfg!(windows) { "stockfish_9_x32.exe" } else { "stockfish" }) + String::from(if cfg!(windows) { + "stockfish_9_x32.exe" + } else { + "stockfish" + }) } fn main() { @@ -47,7 +47,9 @@ fn main() { let ns = { let mut address_space = address_space.write().unwrap(); - let ns = address_space.register_namespace("urn:chess-server").unwrap(); + let ns = address_space + .register_namespace("urn:chess-server") + .unwrap(); let board_node_id = address_space .add_folder("Board", "Board", &NodeId::objects_folder_id()) @@ -93,11 +95,17 @@ fn main() { game.set_position(); let bestmove = game.bestmove().unwrap(); - // uci is a wonderfully terrible specification as evidenced by the way various chess engines + // uci is a wonderfully terrible specification as evidenced by the way various chess engines // return no-bestmove answers - let end_game = bestmove == "(none)" || bestmove == "a1a1" || bestmove == "NULL" || bestmove == "0000"; + let end_game = bestmove == "(none)" + || bestmove == "a1a1" + || bestmove == "NULL" + || bestmove == "0000"; if end_game || game.half_move_clock >= 50 { - println!("Resetting the game - best move = {}, half move clock = {}", bestmove, game.half_move_clock); + println!( + "Resetting the game - best move = {}, half move clock = {}", + bestmove, game.half_move_clock + ); // Reset the board game.reset(); } else { diff --git a/samples/demo-server/src/control.rs b/samples/demo-server/src/control.rs index 00751e169..98e9ae706 100644 --- a/samples/demo-server/src/control.rs +++ b/samples/demo-server/src/control.rs @@ -2,9 +2,7 @@ // SPDX-License-Identifier: MPL-2.0 // Copyright (C) 2017-2020 Adam Lock -use opcua_server::{ - prelude::*, -}; +use opcua_server::prelude::*; pub fn add_control_switches(server: &mut Server, ns: u16) { // The address space is guarded so obtain a lock to change it diff --git a/samples/demo-server/src/historical.rs b/samples/demo-server/src/historical.rs index dafcb207c..3b66b8f2a 100644 --- a/samples/demo-server/src/historical.rs +++ b/samples/demo-server/src/historical.rs @@ -20,7 +20,14 @@ pub struct DataProvider; pub struct EventProvider; impl HistoricalDataProvider for DataProvider { - fn read_raw_modified_details(&self, _address_space: Arc>, _request: ReadRawModifiedDetails, _timestamps_to_return: TimestampsToReturn, _release_continuation_points: bool, _nodes_to_read: &[HistoryReadValueId]) -> Result, StatusCode> { + fn read_raw_modified_details( + &self, + _address_space: Arc>, + _request: ReadRawModifiedDetails, + _timestamps_to_return: TimestampsToReturn, + _release_continuation_points: bool, + _nodes_to_read: &[HistoryReadValueId], + ) -> Result, StatusCode> { println!("Overridden read_raw_modified_details"); Err(StatusCode::BadHistoryOperationUnsupported) } @@ -28,4 +35,4 @@ impl HistoricalDataProvider for DataProvider { impl HistoricalEventProvider for EventProvider { // -} \ No newline at end of file +} diff --git a/samples/demo-server/src/machine.rs b/samples/demo-server/src/machine.rs index 779ce3725..b2d0106ee 100644 --- a/samples/demo-server/src/machine.rs +++ b/samples/demo-server/src/machine.rs @@ -2,15 +2,15 @@ // SPDX-License-Identifier: MPL-2.0 // Copyright (C) 2017-2020 Adam Lock -use std::sync::{Arc, atomic::{AtomicU16, AtomicU32, Ordering}}; +use std::sync::{ + atomic::{AtomicU16, AtomicU32, Ordering}, + Arc, +}; use chrono; use rand; -use opcua_server::{ - events::event::*, - prelude::*, -}; +use opcua_server::{events::event::*, prelude::*}; pub fn add_machinery(server: &mut Server, ns: u16, raise_event: bool) { let address_space = server.address_space(); @@ -27,26 +27,58 @@ pub fn add_machinery(server: &mut Server, ns: u16, raise_event: bool) { .unwrap(); // Create the machine events folder - let _ = address_space - .add_folder_with_id(&machine_events_folder_id(ns), "Events", "Events", &devices_folder_id); + let _ = address_space.add_folder_with_id( + &machine_events_folder_id(ns), + "Events", + "Events", + &devices_folder_id, + ); // Create an object representing a machine that cycles from 0 to 100. Each time it cycles it will create an event - let machine1_id = add_machine(&mut address_space, ns, devices_folder_id.clone(), "Machine 1", machine1_counter.clone()); - let machine2_id = add_machine(&mut address_space, ns, devices_folder_id, "Machine 2", machine2_counter.clone()); + let machine1_id = add_machine( + &mut address_space, + ns, + devices_folder_id.clone(), + "Machine 1", + machine1_counter.clone(), + ); + let machine2_id = add_machine( + &mut address_space, + ns, + devices_folder_id, + "Machine 2", + machine2_counter.clone(), + ); (machine1_id, machine2_id) }; // Increment counters server.add_polling_action(300, move || { let mut address_space = address_space.write().unwrap(); - increment_counter(&mut address_space, ns, machine1_counter.clone(), &machine1_id, raise_event); - increment_counter(&mut address_space, ns, machine2_counter.clone(), &machine2_id, raise_event); + increment_counter( + &mut address_space, + ns, + machine1_counter.clone(), + &machine1_id, + raise_event, + ); + increment_counter( + &mut address_space, + ns, + machine2_counter.clone(), + &machine2_id, + raise_event, + ); }); } -fn machine_type_id(ns: u16) -> NodeId { NodeId::new(ns, "MachineTypeId") } +fn machine_type_id(ns: u16) -> NodeId { + NodeId::new(ns, "MachineTypeId") +} -fn machine_events_folder_id(ns: u16) -> NodeId { NodeId::new(ns, "MachineEvents") } +fn machine_events_folder_id(ns: u16) -> NodeId { + NodeId::new(ns, "MachineEvents") +} fn add_machinery_model(address_space: &mut AddressSpace, ns: u16) { // Create a machine counter type derived from BaseObjectType @@ -68,13 +100,23 @@ fn add_machinery_model(address_space: &mut AddressSpace, ns: u16) { // Create a counter cycled event type let machine_cycled_event_type_id = MachineCycledEventType::event_type_id(ns); - ObjectTypeBuilder::new(&machine_cycled_event_type_id, "MachineCycledEventType", "MachineCycledEventType") - .is_abstract(false) - .subtype_of(ObjectTypeId::BaseEventType) - .insert(address_space); + ObjectTypeBuilder::new( + &machine_cycled_event_type_id, + "MachineCycledEventType", + "MachineCycledEventType", + ) + .is_abstract(false) + .subtype_of(ObjectTypeId::BaseEventType) + .insert(address_space); } -fn add_machine(address_space: &mut AddressSpace, ns: u16, folder_id: NodeId, name: &str, counter: Arc) -> NodeId { +fn add_machine( + address_space: &mut AddressSpace, + ns: u16, + folder_id: NodeId, + name: &str, + counter: Arc, +) -> NodeId { let machine_id = NodeId::new(ns, UAString::from(name)); // Create a machine. Since machines generate events, the event notifier says that it does. ObjectBuilder::new(&machine_id, name, name) @@ -88,17 +130,19 @@ fn add_machine(address_space: &mut AddressSpace, ns: u16, folder_id: NodeId, nam .property_of(machine_id.clone()) .data_type(DataTypeId::UInt16) .has_type_definition(VariableTypeId::PropertyType) - .value_getter(AttrFnGetter::new_boxed(move |_, _, _, _, _, _| -> Result, StatusCode> { - let value = counter.load(Ordering::Relaxed); - Ok(Some(DataValue::new_now(value))) - })) + .value_getter(AttrFnGetter::new_boxed( + move |_, _, _, _, _, _| -> Result, StatusCode> { + let value = counter.load(Ordering::Relaxed); + Ok(Some(DataValue::new_now(value))) + }, + )) .insert(address_space); machine_id } pub struct MachineCycledEventType { - base: BaseEventType + base: BaseEventType, } impl Event for MachineCycledEventType { @@ -124,29 +168,59 @@ lazy_static! { } impl MachineCycledEventType { - fn new(machine_name: &str, ns: u16, node_id: R, browse_name: S, display_name: T, parent_node: U, source_node: V, time: DateTime) -> Self - where R: Into, - S: Into, - T: Into, - U: Into, - V: Into { + fn new( + machine_name: &str, + ns: u16, + node_id: R, + browse_name: S, + display_name: T, + parent_node: U, + source_node: V, + time: DateTime, + ) -> Self + where + R: Into, + S: Into, + T: Into, + U: Into, + V: Into, + { let event_type_id = MachineCycledEventType::event_type_id(ns); let source_node: NodeId = source_node.into(); MachineCycledEventType { - base: BaseEventType::new(node_id, event_type_id, browse_name, display_name, parent_node, time) - .source_node(source_node.clone()) - .source_name(UAString::from(machine_name)) - .message(LocalizedText::from(format!("A machine cycled event from machine {}", source_node))) - .severity(rand::random::() % 999u16 + 1u16) + base: BaseEventType::new( + node_id, + event_type_id, + browse_name, + display_name, + parent_node, + time, + ) + .source_node(source_node.clone()) + .source_name(UAString::from(machine_name)) + .message(LocalizedText::from(format!( + "A machine cycled event from machine {}", + source_node + ))) + .severity(rand::random::() % 999u16 + 1u16), } } } -fn raise_machine_cycled_event(address_space: &mut AddressSpace, ns: u16, source_machine_id: &NodeId) { +fn raise_machine_cycled_event( + address_space: &mut AddressSpace, + ns: u16, + source_machine_id: &NodeId, +) { // Remove old events let now = chrono::Utc::now(); let happened_before = now - chrono::Duration::minutes(5); - purge_events(source_machine_id, MachineCycledEventType::event_type_id(ns), address_space, &happened_before); + purge_events( + source_machine_id, + MachineCycledEventType::event_type_id(ns), + address_space, + &happened_before, + ); let machine_name = if let Some(node) = address_space.find_node(source_machine_id) { format!("{}", node.as_node().display_name().text) @@ -159,13 +233,28 @@ fn raise_machine_cycled_event(address_space: &mut AddressSpace, ns: u16, source_ let event_id = MACHINE_CYCLED_EVENT_ID.fetch_add(1, Ordering::Relaxed); let event_name = format!("Event{}", event_id); let now = DateTime::now(); - let mut event = MachineCycledEventType::new(&machine_name, ns, &event_node_id, event_name.clone(), event_name, machine_events_folder_id(ns), source_machine_id, now); + let mut event = MachineCycledEventType::new( + &machine_name, + ns, + &event_node_id, + event_name.clone(), + event_name, + machine_events_folder_id(ns), + source_machine_id, + now, + ); // create an event object in a folder with the let _ = event.raise(address_space); } -fn increment_counter(address_space: &mut AddressSpace, ns: u16, machine_counter: Arc, machine_id: &NodeId, raise_event: bool) { +fn increment_counter( + address_space: &mut AddressSpace, + ns: u16, + machine_counter: Arc, + machine_id: &NodeId, + raise_event: bool, +) { let c = machine_counter.load(Ordering::Relaxed); let c = if c < 99 { c + 1 diff --git a/samples/demo-server/src/main.rs b/samples/demo-server/src/main.rs index 1ed0c12a8..d9adf864f 100644 --- a/samples/demo-server/src/main.rs +++ b/samples/demo-server/src/main.rs @@ -22,16 +22,13 @@ extern crate log; use std::path::PathBuf; -use opcua_server::{ - http, - prelude::*, -}; +use opcua_server::{http, prelude::*}; mod control; +mod historical; mod machine; mod methods; mod scalar; -mod historical; fn main() { // More powerful logging than a console logger @@ -86,5 +83,11 @@ fn start_http_server(server: &Server) { let connections = server.connections(); let metrics = server.server_metrics(); // The index.html is in a path relative to the working dir. - let _ = http::run_http_server("127.0.0.1:8585", "../../server/html", server_state, connections, metrics); + let _ = http::run_http_server( + "127.0.0.1:8585", + "../../server/html", + server_state, + connections, + metrics, + ); } diff --git a/samples/demo-server/src/methods.rs b/samples/demo-server/src/methods.rs index 65e54aa9f..fc016bcec 100644 --- a/samples/demo-server/src/methods.rs +++ b/samples/demo-server/src/methods.rs @@ -4,12 +4,7 @@ //! A sample method -use opcua_server::{ - address_space::method::MethodBuilder, - callbacks, - prelude::*, - session::Session, -}; +use opcua_server::{address_space::method::MethodBuilder, callbacks, prelude::*, session::Session}; pub fn add_methods(server: &mut Server, ns: u16) { let address_space = server.address_space(); @@ -32,9 +27,7 @@ pub fn add_methods(server: &mut Server, ns: u16) { let fn_node_id = NodeId::new(ns, "HelloWorld"); MethodBuilder::new(&fn_node_id, "HelloWorld", "HelloWorld") .component_of(object_id.clone()) - .output_args(&mut address_space, &[ - ("Result", DataTypeId::String).into() - ]) + .output_args(&mut address_space, &[("Result", DataTypeId::String).into()]) .callback(Box::new(HelloWorld)) .insert(&mut address_space); @@ -42,12 +35,11 @@ pub fn add_methods(server: &mut Server, ns: u16) { let fn_node_id = NodeId::new(ns, "HelloX"); MethodBuilder::new(&fn_node_id, "HelloX", "HelloX") .component_of(object_id.clone()) - .input_args(&mut address_space, &[ - ("YourName", DataTypeId::String).into() - ]) - .output_args(&mut address_space, &[ - ("Result", DataTypeId::String).into() - ]) + .input_args( + &mut address_space, + &[("YourName", DataTypeId::String).into()], + ) + .output_args(&mut address_space, &[("Result", DataTypeId::String).into()]) .callback(Box::new(HelloX)) .insert(&mut address_space); @@ -55,9 +47,7 @@ pub fn add_methods(server: &mut Server, ns: u16) { let fn_node_id = NodeId::new(ns, "Boop"); MethodBuilder::new(&fn_node_id, "Boop", "Boop") .component_of(object_id.clone()) - .input_args(&mut address_space, &[ - ("Ping", DataTypeId::String).into() - ]) + .input_args(&mut address_space, &[("Ping", DataTypeId::String).into()]) .callback(Box::new(HelloX)) .insert(&mut address_space); } @@ -65,7 +55,11 @@ pub fn add_methods(server: &mut Server, ns: u16) { struct NoOp; impl callbacks::Method for NoOp { - fn call(&mut self, _session: &mut Session, _request: &CallMethodRequest) -> Result { + fn call( + &mut self, + _session: &mut Session, + _request: &CallMethodRequest, + ) -> Result { debug!("NoOp method called"); Ok(CallMethodResult { status_code: StatusCode::Good, @@ -79,7 +73,11 @@ impl callbacks::Method for NoOp { struct Boop; impl callbacks::Method for Boop { - fn call(&mut self, _session: &mut Session, request: &CallMethodRequest) -> Result { + fn call( + &mut self, + _session: &mut Session, + request: &CallMethodRequest, + ) -> Result { // Validate input to be a string debug!("Boop method called"); let in1_status = if let Some(ref input_arguments) = request.input_arguments { @@ -99,7 +97,11 @@ impl callbacks::Method for Boop { return Err(StatusCode::BadArgumentsMissing); }; - let status_code = if in1_status.is_good() { StatusCode::Good } else { StatusCode::BadInvalidArgument }; + let status_code = if in1_status.is_good() { + StatusCode::Good + } else { + StatusCode::BadInvalidArgument + }; Ok(CallMethodResult { status_code, @@ -113,7 +115,11 @@ impl callbacks::Method for Boop { struct HelloWorld; impl callbacks::Method for HelloWorld { - fn call(&mut self, _session: &mut Session, _request: &CallMethodRequest) -> Result { + fn call( + &mut self, + _session: &mut Session, + _request: &CallMethodRequest, + ) -> Result { debug!("HelloWorld method called"); let message = format!("Hello World!"); Ok(CallMethodResult { @@ -128,7 +134,11 @@ impl callbacks::Method for HelloWorld { struct HelloX; impl callbacks::Method for HelloX { - fn call(&mut self, _session: &mut Session, request: &CallMethodRequest) -> Result { + fn call( + &mut self, + _session: &mut Session, + request: &CallMethodRequest, + ) -> Result { debug!("HelloX method called"); // Validate input to be a string let mut out1 = Variant::Empty; @@ -150,7 +160,11 @@ impl callbacks::Method for HelloX { return Err(StatusCode::BadArgumentsMissing); }; - let status_code = if in1_status.is_good() { StatusCode::Good } else { StatusCode::BadInvalidArgument }; + let status_code = if in1_status.is_good() { + StatusCode::Good + } else { + StatusCode::BadInvalidArgument + }; Ok(CallMethodResult { status_code, @@ -159,4 +173,4 @@ impl callbacks::Method for HelloX { output_arguments: Some(vec![out1]), }) } -} \ No newline at end of file +} diff --git a/samples/demo-server/src/scalar.rs b/samples/demo-server/src/scalar.rs index 8489b37e6..771806e85 100644 --- a/samples/demo-server/src/scalar.rs +++ b/samples/demo-server/src/scalar.rs @@ -5,9 +5,7 @@ use rand::distributions::Alphanumeric; use rand::Rng; -use opcua_server::{ - prelude::*, -}; +use opcua_server::prelude::*; pub fn add_scalar_variables(server: &mut Server, ns: u16) { let (static_folder_id, dynamic_folder_id) = { @@ -19,7 +17,7 @@ pub fn add_scalar_variables(server: &mut Server, ns: u16) { .unwrap(), address_space .add_folder("Dynamic", "Dynamic", &NodeId::objects_folder_id()) - .unwrap() + .unwrap(), ) }; @@ -34,14 +32,25 @@ pub fn add_scalar_variables(server: &mut Server, ns: u16) { } const SCALAR_TYPES: [DataTypeId; 14] = [ - DataTypeId::Boolean, DataTypeId::Byte, DataTypeId::SByte, DataTypeId::Int16, DataTypeId::UInt16, - DataTypeId::Int32, DataTypeId::UInt32, DataTypeId::Int64, DataTypeId::UInt64, DataTypeId::Float, - DataTypeId::Double, DataTypeId::String, DataTypeId::DateTime, DataTypeId::Guid, -// DataTypeId::ByteString, DataTypeId::Duration, DataTypeId::Integer, DataTypeId::LocaleId, -// DataTypeId::LocalizedText, DataTypeId::NodeId, DataTypeId::Number, DataTypeId::QualifiedName, -// DataTypeId::Time, DataTypeId::UInteger, DataTypeId::UtcTime, DataTypeId::XmlElement, -// DataTypeId::Variant, DataTypeId::Decimal, DataTypeId::ImageBMP, -// DataTypeId::ImageGIF, DataTypeId::ImageJPG, DataTypeId::ImagePNG, + DataTypeId::Boolean, + DataTypeId::Byte, + DataTypeId::SByte, + DataTypeId::Int16, + DataTypeId::UInt16, + DataTypeId::Int32, + DataTypeId::UInt32, + DataTypeId::Int64, + DataTypeId::UInt64, + DataTypeId::Float, + DataTypeId::Double, + DataTypeId::String, + DataTypeId::DateTime, + DataTypeId::Guid, + // DataTypeId::ByteString, DataTypeId::Duration, DataTypeId::Integer, DataTypeId::LocaleId, + // DataTypeId::LocalizedText, DataTypeId::NodeId, DataTypeId::Number, DataTypeId::QualifiedName, + // DataTypeId::Time, DataTypeId::UInteger, DataTypeId::UtcTime, DataTypeId::XmlElement, + // DataTypeId::Variant, DataTypeId::Decimal, DataTypeId::ImageBMP, + // DataTypeId::ImageGIF, DataTypeId::ImageJPG, DataTypeId::ImagePNG, ]; pub fn scalar_node_id(ns: u16, id: DataTypeId, is_dynamic: bool, is_array: bool) -> NodeId { @@ -85,12 +94,12 @@ pub fn scalar_name(id: DataTypeId) -> &'static str { DataTypeId::UtcTime => "UtcTime", DataTypeId::XmlElement => "XmlElement", DataTypeId::Decimal => "Decimal", - DataTypeId::ImageBMP=> "ImageBMP", - DataTypeId::ImageGIF=> "ImageGIF", - DataTypeId::ImageJPG=> "ImageJPG", - DataTypeId::ImagePNG=> "ImagePNG", + DataTypeId::ImageBMP => "ImageBMP", + DataTypeId::ImageGIF => "ImageGIF", + DataTypeId::ImageJPG => "ImageJPG", + DataTypeId::ImagePNG => "ImagePNG", - _ => panic!() + _ => panic!(), } } @@ -120,12 +129,12 @@ pub fn scalar_default_value(id: DataTypeId) -> Variant { DataTypeId::QualifiedName => QualifiedName::null().into(), DataTypeId::UtcTime => DateTime::epoch().into(), DataTypeId::XmlElement => Variant::XmlElement(XmlElement::default()), - DataTypeId::ImageBMP=> ByteString::default().into(), - DataTypeId::ImageGIF=> ByteString::default().into(), - DataTypeId::ImageJPG=> ByteString::default().into(), - DataTypeId::ImagePNG=> ByteString::default().into(), + DataTypeId::ImageBMP => ByteString::default().into(), + DataTypeId::ImageGIF => ByteString::default().into(), + DataTypeId::ImageJPG => ByteString::default().into(), + DataTypeId::ImagePNG => ByteString::default().into(), - _ => panic!() + _ => panic!(), } } @@ -145,12 +154,16 @@ pub fn scalar_random_value(id: DataTypeId) -> Variant { DataTypeId::Float => rng.gen::().into(), DataTypeId::Double => rng.gen::().into(), DataTypeId::String => { - let s = (0..10).map(|_| rng.sample(Alphanumeric)).collect::(); + let s = (0..10) + .map(|_| rng.sample(Alphanumeric)) + .collect::(); UAString::from(s).into() } - DataTypeId::DateTime => DateTime::from(rng.gen_range::(0, DateTime::endtimes_ticks())).into(), + DataTypeId::DateTime => { + DateTime::from(rng.gen_range::(0, DateTime::endtimes_ticks())).into() + } DataTypeId::Guid => Guid::new().into(), - _ => scalar_default_value(id) + _ => scalar_default_value(id), } } @@ -190,7 +203,9 @@ fn add_static_array_variables(server: &mut Server, ns: u16, static_folder_id: &N SCALAR_TYPES.iter().for_each(|sn| { let node_id = scalar_node_id(ns, *sn, false, true); let name = scalar_name(*sn); - let values = (0..100).map(|_| scalar_default_value(*sn)).collect::>(); + let values = (0..100) + .map(|_| scalar_default_value(*sn)) + .collect::>(); VariableBuilder::new(&node_id, name, name) .data_type(*sn) .value_rank(1) @@ -235,7 +250,9 @@ fn add_dynamic_array_variables(server: &mut Server, ns: u16, dynamic_folder_id: SCALAR_TYPES.iter().for_each(|sn| { let node_id = scalar_node_id(ns, *sn, true, true); let name = scalar_name(*sn); - let values = (0..10).map(|_| scalar_default_value(*sn)).collect::>(); + let values = (0..10) + .map(|_| scalar_default_value(*sn)) + .collect::>(); VariableBuilder::new(&node_id, name, name) .data_type(*sn) .value_rank(1) @@ -255,17 +272,26 @@ fn set_dynamic_timers(server: &mut Server, ns: u16) { let now = DateTime::now(); SCALAR_TYPES.iter().for_each(|sn| { let node_id = scalar_node_id(ns, *sn, true, false); - let _ = address_space.set_variable_value_by_ref(&node_id, scalar_random_value(*sn), &now, &now); + let _ = address_space.set_variable_value_by_ref( + &node_id, + scalar_random_value(*sn), + &now, + &now, + ); let node_id = scalar_node_id(ns, *sn, true, true); - let values = (0..10).map(|_| scalar_random_value(*sn)).collect::>(); + let values = (0..10) + .map(|_| scalar_random_value(*sn)) + .collect::>(); let _ = address_space.set_variable_value_by_ref(&node_id, values, &now, &now); }); }); } pub fn add_stress_variables(server: &mut Server, ns: u16) { - let node_ids = (0..1000).map(|i| NodeId::new(ns, format!("v{:04}", i))).collect::>(); + let node_ids = (0..1000) + .map(|i| NodeId::new(ns, format!("v{:04}", i))) + .collect::>(); let address_space = server.address_space(); let mut address_space = address_space.write().unwrap(); diff --git a/samples/discovery-client/src/main.rs b/samples/discovery-client/src/main.rs index 80a2d4351..56c97f79c 100644 --- a/samples/discovery-client/src/main.rs +++ b/samples/discovery-client/src/main.rs @@ -17,23 +17,27 @@ impl Args { let mut args = pico_args::Arguments::from_env(); Ok(Args { help: args.contains(["-h", "--help"]), - url: args.opt_value_from_str("--url")?.unwrap_or(String::from(DEFAULT_DISCOVERY_URL)), + url: args + .opt_value_from_str("--url")? + .unwrap_or(String::from(DEFAULT_DISCOVERY_URL)), }) } pub fn usage() { - println!(r#"OPC UA Discovery client + println!( + r#"OPC UA Discovery client Usage: -h, --help Show help - --url The url for the discovery server (default: {})"#, DEFAULT_DISCOVERY_URL); + --url The url for the discovery server (default: {})"#, + DEFAULT_DISCOVERY_URL + ); } } const DEFAULT_DISCOVERY_URL: &str = "opc.tcp://localhost:4840/"; fn main() -> Result<(), ()> { - let args = Args::parse_args() - .map_err(|_| Args::usage())?; + let args = Args::parse_args().map_err(|_| Args::usage())?; if args.help { Args::usage(); } else { @@ -53,14 +57,19 @@ fn main() -> Result<(), ()> { // Each server is an `ApplicationDescription` println!("Server : {}", server.application_name); if let Some(ref discovery_urls) = server.discovery_urls { - discovery_urls.iter().for_each(|discovery_url| print_server_endpoints(discovery_url.as_ref())); + discovery_urls.iter().for_each(|discovery_url| { + print_server_endpoints(discovery_url.as_ref()) + }); } else { println!(" No discovery urls for this server"); } }); } Err(err) => { - println!("ERROR: Cannot find servers on discovery server - check this error - {:?}", err); + println!( + "ERROR: Cannot find servers on discovery server - check this error - {:?}", + err + ); } } } @@ -79,12 +88,20 @@ fn print_server_endpoints(discovery_url: &str) { Result::Ok(endpoints) => { println!(" Server has these endpoints:"); endpoints.iter().for_each(|e| { - println!(" {} - {:?} / {:?}", e.endpoint_url, SecurityPolicy::from_str(e.security_policy_uri.as_ref()).unwrap(), e.security_mode); + println!( + " {} - {:?} / {:?}", + e.endpoint_url, + SecurityPolicy::from_str(e.security_policy_uri.as_ref()).unwrap(), + e.security_mode + ); }); } Result::Err(status_code) => { - println!(" ERROR: Cannot get endpoints for this server url, error - {}", status_code); + println!( + " ERROR: Cannot get endpoints for this server url, error - {}", + status_code + ); } } } -} \ No newline at end of file +} diff --git a/samples/modbus-server/src/config.rs b/samples/modbus-server/src/config.rs index 70d2f77d7..a664aacc6 100644 --- a/samples/modbus-server/src/config.rs +++ b/samples/modbus-server/src/config.rs @@ -2,11 +2,7 @@ // SPDX-License-Identifier: MPL-2.0 // Copyright (C) 2017-2020 Adam Lock -use std::{ - fs::File, - io::Read, - path::Path, -}; +use std::{fs::File, io::Read, path::Path}; use opcua_server::prelude::*; @@ -68,13 +64,18 @@ impl AliasType { /// Returns the size of the type in number of registers pub fn size_in_words(&self) -> u16 { match self { - Self::Default | Self::Boolean | Self::Byte | Self::SByte | Self::UInt16 | Self::Int16 => 1, + Self::Default + | Self::Boolean + | Self::Byte + | Self::SByte + | Self::UInt16 + | Self::Int16 => 1, Self::UInt32 => 2, Self::Int32 => 2, Self::UInt64 => 4, Self::Int64 => 4, Self::Float => 2, - Self::Double => 4 + Self::Double => 4, } } } @@ -97,7 +98,6 @@ pub struct Alias { pub writable: bool, } - #[derive(Deserialize, Clone, Copy, PartialEq)] pub enum AccessMode { ReadOnly, @@ -146,7 +146,7 @@ impl TableConfig { true } } - _ => true + _ => true, }; range_valid && access_valid @@ -157,11 +157,15 @@ impl TableConfig { } pub fn writable(&self) -> bool { - self.count > 0 && (self.access_mode == AccessMode::WriteOnly || self.access_mode == AccessMode::ReadWrite) + self.count > 0 + && (self.access_mode == AccessMode::WriteOnly + || self.access_mode == AccessMode::ReadWrite) } pub fn readable(&self) -> bool { - self.count > 0 && (self.access_mode == AccessMode::ReadOnly || self.access_mode == AccessMode::ReadWrite) + self.count > 0 + && (self.access_mode == AccessMode::ReadOnly + || self.access_mode == AccessMode::ReadWrite) } } @@ -197,11 +201,17 @@ impl Config { } Ok(config) } else { - println!("Cannot deserialize configuration from {}", path.to_string_lossy()); + println!( + "Cannot deserialize configuration from {}", + path.to_string_lossy() + ); Err(()) } } else { - println!("Cannot read configuration file {} to string", path.to_string_lossy()); + println!( + "Cannot read configuration file {} to string", + path.to_string_lossy() + ); Err(()) } } else { @@ -233,7 +243,8 @@ impl Config { valid = false; } if let Some(ref aliases) = self.aliases { - let set: std::collections::HashSet<&str> = aliases.iter().map(|a| a.name.as_ref()).collect::<_>(); + let set: std::collections::HashSet<&str> = + aliases.iter().map(|a| a.name.as_ref()).collect::<_>(); if set.len() != aliases.len() { println!("Aliases contains duplicate names"); valid = false; diff --git a/samples/modbus-server/src/main.rs b/samples/modbus-server/src/main.rs index d9e77955a..35f239c02 100644 --- a/samples/modbus-server/src/main.rs +++ b/samples/modbus-server/src/main.rs @@ -17,8 +17,8 @@ use std::{ }; mod config; -mod opcua; mod master; +mod opcua; mod slave; #[derive(Clone, Copy, PartialEq)] @@ -75,16 +75,21 @@ impl Args { Ok(Args { help: args.contains(["-h", "--help"]), run_demo_slave: args.contains("--run-demo-slave"), - config: args.opt_value_from_str("--config")?.unwrap_or(String::from(DEFAULT_CONFIG)), + config: args + .opt_value_from_str("--config")? + .unwrap_or(String::from(DEFAULT_CONFIG)), }) } pub fn usage() { - println!(r#"MODBUS server + println!( + r#"MODBUS server Usage: -h, --help Show help --config Configuration file (default: {}) - --run-demo-slave Runs a demo slave to ensure the sample has something to connect to"#, DEFAULT_CONFIG); + --run-demo-slave Runs a demo slave to ensure the sample has something to connect to"#, + DEFAULT_CONFIG + ); } } @@ -92,8 +97,7 @@ const DEFAULT_CONFIG: &str = "./modbus.conf"; fn main() -> Result<(), ()> { // Read command line arguments - let args = Args::parse_args() - .map_err(|_| Args::usage())?; + let args = Args::parse_args().map_err(|_| Args::usage())?; if args.help { Args::usage(); } else { @@ -144,4 +148,3 @@ fn run(config: config::Config, run_demo_slave: bool) { let modbus = master::MODBUS::run(runtime.clone()); opcua::run(runtime, modbus); } - diff --git a/samples/modbus-server/src/master.rs b/samples/modbus-server/src/master.rs index e23cbc270..345063e5c 100644 --- a/samples/modbus-server/src/master.rs +++ b/samples/modbus-server/src/master.rs @@ -8,13 +8,10 @@ use std::{ time::{Duration, Instant}, }; -use futures::{Future, sink::Sink, stream::Stream}; +use futures::{sink::Sink, stream::Stream, Future}; use tokio::sync as tsync; use tokio_core::reactor::Core; -use tokio_modbus::{ - client, - prelude::*, -}; +use tokio_modbus::{client, prelude::*}; use tokio_timer::Interval; use crate::Runtime; @@ -111,26 +108,38 @@ fn store_values_in_registers(values: Vec, registers: Arc>>) struct InputCoil; impl InputCoil { - pub fn async_read(handle: &tokio_core::reactor::Handle, ctx: &client::Context, runtime: &Arc>) { + pub fn async_read( + handle: &tokio_core::reactor::Handle, + ctx: &client::Context, + runtime: &Arc>, + ) { let (coils, address, count) = InputCoil::begin_read_input_coils(runtime); let runtime = runtime.clone(); let runtime_for_err = runtime.clone(); - handle.spawn(ctx.read_discrete_inputs(address, count as u16) - .map_err(move |err| { - println!("Read input coils error {:?}", err); - InputCoil::end_read_input_coils(&runtime_for_err); - }) - .and_then(move |values| { - store_values_in_coils(values, coils.clone()); - InputCoil::end_read_input_coils(&runtime); - Ok(()) - })); + handle.spawn( + ctx.read_discrete_inputs(address, count as u16) + .map_err(move |err| { + println!("Read input coils error {:?}", err); + InputCoil::end_read_input_coils(&runtime_for_err); + }) + .and_then(move |values| { + store_values_in_coils(values, coils.clone()); + InputCoil::end_read_input_coils(&runtime); + Ok(()) + }), + ); } - fn begin_read_input_coils(runtime: &Arc>) -> (Arc>>, u16, u16) { + fn begin_read_input_coils( + runtime: &Arc>, + ) -> (Arc>>, u16, u16) { let mut runtime = runtime.write().unwrap(); runtime.reading_input_coils = true; - (runtime.input_coils.clone(), runtime.config.input_coils.base_address, runtime.config.input_coils.count) + ( + runtime.input_coils.clone(), + runtime.config.input_coils.base_address, + runtime.config.input_coils.count, + ) } fn end_read_input_coils(runtime: &Arc>) { @@ -142,30 +151,47 @@ impl InputCoil { struct OutputCoil; impl OutputCoil { - pub fn async_read(handle: &tokio_core::reactor::Handle, ctx: &client::Context, runtime: &Arc>) { + pub fn async_read( + handle: &tokio_core::reactor::Handle, + ctx: &client::Context, + runtime: &Arc>, + ) { let (coils, address, count) = OutputCoil::begin_read_output_coils(runtime); let runtime = runtime.clone(); let runtime_for_err = runtime.clone(); - handle.spawn(ctx.read_coils(address, count as u16) - .map_err(move |err| { - println!("Read output coils error {:?}", err); - OutputCoil::end_read_output_coils(&runtime_for_err); - }) - .and_then(move |values| { - store_values_in_coils(values, coils.clone()); - OutputCoil::end_read_output_coils(&runtime); - Ok(()) - })); + handle.spawn( + ctx.read_coils(address, count as u16) + .map_err(move |err| { + println!("Read output coils error {:?}", err); + OutputCoil::end_read_output_coils(&runtime_for_err); + }) + .and_then(move |values| { + store_values_in_coils(values, coils.clone()); + OutputCoil::end_read_output_coils(&runtime); + Ok(()) + }), + ); } - pub fn async_write(handle: &tokio_core::reactor::Handle, ctx: &client::Context, addr: u16, value: bool) { + pub fn async_write( + handle: &tokio_core::reactor::Handle, + ctx: &client::Context, + addr: u16, + value: bool, + ) { handle.spawn(ctx.write_single_coil(addr, value).map_err(move |_err| ())); } - fn begin_read_output_coils(runtime: &Arc>) -> (Arc>>, u16, u16) { + fn begin_read_output_coils( + runtime: &Arc>, + ) -> (Arc>>, u16, u16) { let mut runtime = runtime.write().unwrap(); runtime.reading_output_coils = true; - (runtime.output_coils.clone(), runtime.config.output_coils.base_address, runtime.config.output_coils.count) + ( + runtime.output_coils.clone(), + runtime.config.output_coils.base_address, + runtime.config.output_coils.count, + ) } fn end_read_output_coils(runtime: &Arc>) { @@ -177,26 +203,38 @@ impl OutputCoil { struct InputRegister; impl InputRegister { - pub fn async_read(handle: &tokio_core::reactor::Handle, ctx: &client::Context, runtime: &Arc>) { + pub fn async_read( + handle: &tokio_core::reactor::Handle, + ctx: &client::Context, + runtime: &Arc>, + ) { let (registers, address, count) = InputRegister::begin_read_input_registers(runtime); let runtime = runtime.clone(); let runtime_for_err = runtime.clone(); - handle.spawn(ctx.read_input_registers(address, count as u16) - .map_err(move |err| { - println!("Read input registers error {:?}", err); - InputRegister::end_read_input_registers(&runtime_for_err); - }) - .and_then(move |values| { - store_values_in_registers(values, registers.clone()); - InputRegister::end_read_input_registers(&runtime); - Ok(()) - })); + handle.spawn( + ctx.read_input_registers(address, count as u16) + .map_err(move |err| { + println!("Read input registers error {:?}", err); + InputRegister::end_read_input_registers(&runtime_for_err); + }) + .and_then(move |values| { + store_values_in_registers(values, registers.clone()); + InputRegister::end_read_input_registers(&runtime); + Ok(()) + }), + ); } - fn begin_read_input_registers(runtime: &Arc>) -> (Arc>>, u16, u16) { + fn begin_read_input_registers( + runtime: &Arc>, + ) -> (Arc>>, u16, u16) { let mut runtime = runtime.write().unwrap(); runtime.reading_input_registers = true; - (runtime.input_registers.clone(), runtime.config.input_registers.base_address, runtime.config.input_registers.count) + ( + runtime.input_registers.clone(), + runtime.config.input_registers.base_address, + runtime.config.input_registers.count, + ) } fn end_read_input_registers(runtime: &Arc>) { @@ -208,34 +246,56 @@ impl InputRegister { struct OutputRegister; impl OutputRegister { - pub fn async_read(handle: &tokio_core::reactor::Handle, ctx: &client::Context, runtime: &Arc>) { + pub fn async_read( + handle: &tokio_core::reactor::Handle, + ctx: &client::Context, + runtime: &Arc>, + ) { let (registers, address, count) = OutputRegister::begin_read_output_registers(runtime); let runtime = runtime.clone(); let runtime_for_err = runtime.clone(); - handle.spawn(ctx.read_holding_registers(address, count as u16) - .map_err(move |err| { - println!("Read input registers error {:?}", err); - OutputRegister::end_read_output_registers(&runtime_for_err); - }) - .and_then(move |values| { - store_values_in_registers(values, registers.clone()); - OutputRegister::end_read_output_registers(&runtime); - Ok(()) - })); + handle.spawn( + ctx.read_holding_registers(address, count as u16) + .map_err(move |err| { + println!("Read input registers error {:?}", err); + OutputRegister::end_read_output_registers(&runtime_for_err); + }) + .and_then(move |values| { + store_values_in_registers(values, registers.clone()); + OutputRegister::end_read_output_registers(&runtime); + Ok(()) + }), + ); } - pub fn async_write_register(handle: &tokio_core::reactor::Handle, ctx: &client::Context, addr: u16, value: u16) { + pub fn async_write_register( + handle: &tokio_core::reactor::Handle, + ctx: &client::Context, + addr: u16, + value: u16, + ) { handle.spawn(ctx.write_single_register(addr, value).map_err(|_| ())); } - pub fn async_write_registers(handle: &tokio_core::reactor::Handle, ctx: &client::Context, addr: u16, values: &[u16]) { + pub fn async_write_registers( + handle: &tokio_core::reactor::Handle, + ctx: &client::Context, + addr: u16, + values: &[u16], + ) { handle.spawn(ctx.write_multiple_registers(addr, values).map_err(|_| ())); } - fn begin_read_output_registers(runtime: &Arc>) -> (Arc>>, u16, u16) { + fn begin_read_output_registers( + runtime: &Arc>, + ) -> (Arc>>, u16, u16) { let mut runtime = runtime.write().unwrap(); runtime.reading_input_registers = true; - (runtime.output_registers.clone(), runtime.config.output_registers.base_address, runtime.config.output_registers.count) + ( + runtime.output_registers.clone(), + runtime.config.output_registers.base_address, + runtime.config.output_registers.count, + ) } fn end_read_output_registers(runtime: &Arc>) { @@ -251,19 +311,33 @@ enum Message { WriteRegisters(u16, Vec), } -fn spawn_receiver(handle: &tokio_core::reactor::Handle, rx: tsync::mpsc::UnboundedReceiver, ctx: client::Context, runtime: Arc>) { +fn spawn_receiver( + handle: &tokio_core::reactor::Handle, + rx: tsync::mpsc::UnboundedReceiver, + ctx: client::Context, + runtime: Arc>, +) { let handle_for_action = handle.clone(); let task = rx .for_each(move |msg| { match msg { Message::UpdateValues => { // Test if the previous action is finished. - let (read_input_registers, read_output_registers, read_input_coils, read_output_coils) = { + let ( + read_input_registers, + read_output_registers, + read_input_coils, + read_output_coils, + ) = { let runtime = runtime.read().unwrap(); - (!runtime.reading_input_registers && runtime.config.input_registers.readable(), - !runtime.reading_output_registers && runtime.config.output_registers.readable(), - !runtime.reading_input_coils && runtime.config.input_coils.readable(), - !runtime.reading_output_coils && runtime.config.output_coils.readable()) + ( + !runtime.reading_input_registers + && runtime.config.input_registers.readable(), + !runtime.reading_output_registers + && runtime.config.output_registers.readable(), + !runtime.reading_input_coils && runtime.config.input_coils.readable(), + !runtime.reading_output_coils && runtime.config.output_coils.readable(), + ) }; if read_input_registers { InputRegister::async_read(&handle_for_action, &ctx, &runtime); @@ -293,17 +367,27 @@ fn spawn_receiver(handle: &tokio_core::reactor::Handle, rx: tsync::mpsc::Unbound Message::WriteRegisters(addr, values) => { let runtime = runtime.read().unwrap(); if runtime.config.output_registers.writable() { - OutputRegister::async_write_registers(&handle_for_action, &ctx, addr, &values); + OutputRegister::async_write_registers( + &handle_for_action, + &ctx, + addr, + &values, + ); } } } Ok(()) - }).map_err(|_| ()); + }) + .map_err(|_| ()); handle.spawn(task); } /// Returns a read timer future which periodically polls the MODBUS slave for some values -fn spawn_timer(handle: &tokio_core::reactor::Handle, tx: tsync::mpsc::UnboundedSender, runtime: Arc>) -> impl Future { +fn spawn_timer( + handle: &tokio_core::reactor::Handle, + tx: tsync::mpsc::UnboundedSender, + runtime: Arc>, +) -> impl Future { let interval = { let runtime = runtime.read().unwrap(); Duration::from_millis(runtime.config.read_interval as u64) @@ -313,13 +397,9 @@ fn spawn_timer(handle: &tokio_core::reactor::Handle, tx: tsync::mpsc::UnboundedS .map_err(|err| { println!("Timer error {:?}", err); }) - .map(move |x| { - (x, handle.clone(), tx.clone()) - }) + .map(move |x| (x, handle.clone(), tx.clone())) .for_each(|(_instant, handle, tx)| { - handle.spawn(tx.send(Message::UpdateValues) - .map(|_| ()) - .map_err(|_| ())); + handle.spawn(tx.send(Message::UpdateValues).map(|_| ()).map_err(|_| ())); Ok(()) }) -} \ No newline at end of file +} diff --git a/samples/modbus-server/src/opcua.rs b/samples/modbus-server/src/opcua.rs index 23b58d29b..133d2eb11 100644 --- a/samples/modbus-server/src/opcua.rs +++ b/samples/modbus-server/src/opcua.rs @@ -3,7 +3,8 @@ // Copyright (C) 2017-2020 Adam Lock use std::{ - f32, f64, i16, i32, i64, i8, path::PathBuf, + f32, f64, i16, i32, i64, i8, + path::PathBuf, sync::{Arc, Mutex, RwLock}, u16, }; @@ -11,15 +12,15 @@ use std::{ use opcua_server::prelude::*; use crate::{ - config::{Alias, AliasType, TableConfig}, master::MODBUS, + config::{Alias, AliasType, TableConfig}, + master::MODBUS, Runtime, Table, }; // Runs the OPC UA server which is just a basic server with some variables hooked up to getters pub fn run(runtime: Arc>, modbus: MODBUS) { let config = ServerConfig::load(&PathBuf::from("../server.conf")).unwrap(); - let server = ServerBuilder::from_config(config) - .server().unwrap(); + let server = ServerBuilder::from_config(config).server().unwrap(); let address_space = server.address_space(); @@ -39,7 +40,7 @@ fn register_number(table: Table, address: u16) -> u32 { Table::OutputCoils => 1, Table::InputCoils => 10001, Table::InputRegisters => 30001, - Table::OutputRegisters => 40001 + Table::OutputRegisters => 40001, }; base + address as u32 } @@ -50,7 +51,12 @@ fn make_node_id(nsidx: u16, table: Table, address: u16) -> NodeId { } /// Adds all the MODBUS variables to the address space -fn add_variables(runtime: Arc>, modbus: Arc>, address_space: &mut AddressSpace, nsidx: u16) { +fn add_variables( + runtime: Arc>, + modbus: Arc>, + address_space: &mut AddressSpace, + nsidx: u16, +) { // Create a folder under objects folder let modbus_folder_id = address_space .add_folder("MODBUS", "MODBUS", &NodeId::objects_folder_id()) @@ -71,7 +77,13 @@ fn start_end(table_config: &TableConfig) -> (usize, usize) { (start, end) } -fn add_input_coils(runtime: &Arc>, modbus: &Arc>, address_space: &mut AddressSpace, nsidx: u16, parent_folder_id: &NodeId) { +fn add_input_coils( + runtime: &Arc>, + modbus: &Arc>, + address_space: &mut AddressSpace, + nsidx: u16, + parent_folder_id: &NodeId, +) { let folder_id = address_space .add_folder("Input Coils", "Input Coils", parent_folder_id) .unwrap(); @@ -83,10 +95,27 @@ fn add_input_coils(runtime: &Arc>, modbus: &Arc>, (start, end, values) }; - make_variables(modbus, address_space, nsidx, Table::InputCoils, start, end, &folder_id, values, false, |i| format!("Input Coil {}", i)); + make_variables( + modbus, + address_space, + nsidx, + Table::InputCoils, + start, + end, + &folder_id, + values, + false, + |i| format!("Input Coil {}", i), + ); } -fn add_output_coils(runtime: &Arc>, modbus: &Arc>, address_space: &mut AddressSpace, nsidx: u16, parent_folder_id: &NodeId) { +fn add_output_coils( + runtime: &Arc>, + modbus: &Arc>, + address_space: &mut AddressSpace, + nsidx: u16, + parent_folder_id: &NodeId, +) { let folder_id = address_space .add_folder("Output Coils", "Output Coils", parent_folder_id) .unwrap(); @@ -98,10 +127,27 @@ fn add_output_coils(runtime: &Arc>, modbus: &Arc>, (start, end, values) }; - make_variables(modbus, address_space, nsidx, Table::OutputCoils, start, end, &folder_id, values, false, |i| format!("Output Coil {}", i)); + make_variables( + modbus, + address_space, + nsidx, + Table::OutputCoils, + start, + end, + &folder_id, + values, + false, + |i| format!("Output Coil {}", i), + ); } -fn add_input_registers(runtime: &Arc>, modbus: &Arc>, address_space: &mut AddressSpace, nsidx: u16, parent_folder_id: &NodeId) { +fn add_input_registers( + runtime: &Arc>, + modbus: &Arc>, + address_space: &mut AddressSpace, + nsidx: u16, + parent_folder_id: &NodeId, +) { let folder_id = address_space .add_folder("Input Registers", "Input Registers", parent_folder_id) .unwrap(); @@ -112,10 +158,27 @@ fn add_input_registers(runtime: &Arc>, modbus: &Arc>, modbus: &Arc>, address_space: &mut AddressSpace, nsidx: u16, parent_folder_id: &NodeId) { +fn add_output_registers( + runtime: &Arc>, + modbus: &Arc>, + address_space: &mut AddressSpace, + nsidx: u16, + parent_folder_id: &NodeId, +) { let folder_id = address_space .add_folder("Output Registers", "Output Registers", parent_folder_id) .unwrap(); @@ -126,10 +189,27 @@ fn add_output_registers(runtime: &Arc>, modbus: &Arc>, modbus: &Arc>, address_space: &mut AddressSpace, nsidx: u16, parent_folder_id: &NodeId) { +fn add_aliases( + runtime: &Arc>, + modbus: &Arc>, + address_space: &mut AddressSpace, + nsidx: u16, + parent_folder_id: &NodeId, +) { let aliases = { let runtime = runtime.read().unwrap(); runtime.config.aliases.clone() @@ -142,7 +222,11 @@ fn add_aliases(runtime: &Arc>, modbus: &Arc>, addr // Create variables for all of the aliases aliases.into_iter().for_each(move |alias| { // Create a getter/setter - let getter_setter = Arc::new(Mutex::new(AliasGetterSetter::new(runtime.clone(), modbus.clone(), alias.clone()))); + let getter_setter = Arc::new(Mutex::new(AliasGetterSetter::new( + runtime.clone(), + modbus.clone(), + alias.clone(), + ))); // Create a variable for the alias let node_id = NodeId::new(nsidx, alias.name.clone()); let data_type: DataTypeId = alias.data_type.into(); @@ -153,8 +237,7 @@ fn add_aliases(runtime: &Arc>, modbus: &Arc>, addr .value_getter(getter_setter.clone()); let v = if alias.writable { - v.value_setter(getter_setter) - .writable() + v.value_setter(getter_setter).writable() } else { v }; @@ -165,8 +248,19 @@ fn add_aliases(runtime: &Arc>, modbus: &Arc>, addr } /// Creates variables and hooks them up to getters -fn make_variables(modbus: &Arc>, address_space: &mut AddressSpace, nsidx: u16, table: Table, start: usize, end: usize, parent_folder_id: &NodeId, values: Arc>>, default_value: T, name_formatter: impl Fn(usize) -> String) - where T: 'static + Copy + Send + Sync + Into +fn make_variables( + modbus: &Arc>, + address_space: &mut AddressSpace, + nsidx: u16, + table: Table, + start: usize, + end: usize, + parent_folder_id: &NodeId, + values: Arc>>, + default_value: T, + name_formatter: impl Fn(usize) -> String, +) where + T: 'static + Copy + Send + Sync + Into, { // Create variables (start..end).for_each(|i| { @@ -176,56 +270,66 @@ fn make_variables(modbus: &Arc>, address_space: &mut AddressSpa let v = VariableBuilder::new(&make_node_id(nsidx, table, addr), &name, &name) .organized_by(parent_folder_id) .value(default_value) - .value_getter(AttrFnGetter::new_boxed(move |_node_id, _timestamps_to_return, _attribute_id, _numeric_range, _name, _f| -> Result, StatusCode> { - let values = values.read().unwrap(); - let value = *values.get(i - start).unwrap(); - Ok(Some(DataValue::new_now(value))) - })); + .value_getter(AttrFnGetter::new_boxed( + move |_node_id, + _timestamps_to_return, + _attribute_id, + _numeric_range, + _name, + _f| + -> Result, StatusCode> { + let values = values.read().unwrap(); + let value = *values.get(i - start).unwrap(); + Ok(Some(DataValue::new_now(value))) + }, + )); // Output tables have setters too let v = match table { - Table::InputCoils => { - v.data_type(DataTypeId::Boolean) - } + Table::InputCoils => v.data_type(DataTypeId::Boolean), Table::OutputCoils => { let modbus = modbus.clone(); v.data_type(DataTypeId::Boolean) - .value_setter(AttrFnSetter::new_boxed(move |_node_id, _attribute_id, _index_range, value| { - // Try to cast to a bool - let value = if let Some(value) = value.value { - value.cast(VariantTypeId::Boolean) - } else { - Variant::Empty - }; - if let Variant::Boolean(value) = value { - let modbus = modbus.lock().unwrap(); - modbus.write_to_coil(addr, value); - Ok(()) - } else { - Err(StatusCode::BadTypeMismatch) - } - })).writable() - } - Table::InputRegisters => { - v.data_type(DataTypeId::UInt16) + .value_setter(AttrFnSetter::new_boxed( + move |_node_id, _attribute_id, _index_range, value| { + // Try to cast to a bool + let value = if let Some(value) = value.value { + value.cast(VariantTypeId::Boolean) + } else { + Variant::Empty + }; + if let Variant::Boolean(value) = value { + let modbus = modbus.lock().unwrap(); + modbus.write_to_coil(addr, value); + Ok(()) + } else { + Err(StatusCode::BadTypeMismatch) + } + }, + )) + .writable() } + Table::InputRegisters => v.data_type(DataTypeId::UInt16), Table::OutputRegisters => { let modbus = modbus.clone(); v.data_type(DataTypeId::UInt16) - .value_setter(AttrFnSetter::new_boxed(move |_node_id, _attribute_id, _index_range, value| { - let value = if let Some(value) = value.value { - value.cast(VariantTypeId::UInt16) - } else { - Variant::Empty - }; - if let Variant::UInt16(value) = value { - let modbus = modbus.lock().unwrap(); - modbus.write_to_register(addr, value); - Ok(()) - } else { - Err(StatusCode::BadTypeMismatch) - } - })).writable() + .value_setter(AttrFnSetter::new_boxed( + move |_node_id, _attribute_id, _index_range, value| { + let value = if let Some(value) = value.value { + value.cast(VariantTypeId::UInt16) + } else { + Variant::Empty + }; + if let Variant::UInt16(value) = value { + let modbus = modbus.lock().unwrap(); + modbus.write_to_register(addr, value); + Ok(()) + } else { + Err(StatusCode::BadTypeMismatch) + } + }, + )) + .writable() } }; v.insert(address_space); @@ -239,18 +343,41 @@ pub struct AliasGetterSetter { } impl AttributeGetter for AliasGetterSetter { - fn get(&mut self, _node_id: &NodeId, _timestamps_to_return: TimestampsToReturn, _attribute_id: AttributeId, _index_range: NumericRange, _data_encoding: &QualifiedName, __max_age: f64) -> Result, StatusCode> { - AliasGetterSetter::get_alias_value(self.runtime.clone(), self.alias.data_type, self.alias.number) + fn get( + &mut self, + _node_id: &NodeId, + _timestamps_to_return: TimestampsToReturn, + _attribute_id: AttributeId, + _index_range: NumericRange, + _data_encoding: &QualifiedName, + __max_age: f64, + ) -> Result, StatusCode> { + AliasGetterSetter::get_alias_value( + self.runtime.clone(), + self.alias.data_type, + self.alias.number, + ) } } impl AttributeSetter for AliasGetterSetter { - fn set(&mut self, _node_id: &NodeId, _attribute_id: AttributeId, _index_range: NumericRange, data_value: DataValue) -> Result<(), StatusCode> { + fn set( + &mut self, + _node_id: &NodeId, + _attribute_id: AttributeId, + _index_range: NumericRange, + data_value: DataValue, + ) -> Result<(), StatusCode> { if !self.is_writable() { panic!("Attribute setter should not have been callable") } if let Some(value) = data_value.value { - let _ = AliasGetterSetter::set_alias_value(self.modbus.clone(), self.alias.data_type, self.alias.number, value)?; + let _ = AliasGetterSetter::set_alias_value( + self.modbus.clone(), + self.alias.data_type, + self.alias.number, + value, + )?; Ok(()) } else { Err(StatusCode::BadUnexpectedError) @@ -259,8 +386,16 @@ impl AttributeSetter for AliasGetterSetter { } impl AliasGetterSetter { - pub fn new(runtime: Arc>, modbus: Arc>, alias: Alias) -> AliasGetterSetter { - AliasGetterSetter { runtime, modbus, alias } + pub fn new( + runtime: Arc>, + modbus: Arc>, + alias: Alias, + ) -> AliasGetterSetter { + AliasGetterSetter { + runtime, + modbus, + alias, + } } fn is_writable(&self) -> bool { @@ -272,19 +407,42 @@ impl AliasGetterSetter { self.alias.writable && table_writable } - fn get_alias_value(runtime: Arc>, data_type: AliasType, number: u16) -> Result, StatusCode> { + fn get_alias_value( + runtime: Arc>, + data_type: AliasType, + number: u16, + ) -> Result, StatusCode> { let runtime = runtime.read().unwrap(); let (table, address) = Table::table_from_number(number); let value = match table { - Table::OutputCoils => Self::value_from_coil(address, &runtime.config.output_coils, &runtime.output_coils), - Table::InputCoils => Self::value_from_coil(address, &runtime.config.input_coils, &runtime.input_coils), - Table::InputRegisters => Self::value_from_register(address, &runtime.config.input_registers, data_type, &runtime.input_registers), - Table::OutputRegisters => Self::value_from_register(address, &runtime.config.output_registers, data_type, &runtime.output_registers), + Table::OutputCoils => { + Self::value_from_coil(address, &runtime.config.output_coils, &runtime.output_coils) + } + Table::InputCoils => { + Self::value_from_coil(address, &runtime.config.input_coils, &runtime.input_coils) + } + Table::InputRegisters => Self::value_from_register( + address, + &runtime.config.input_registers, + data_type, + &runtime.input_registers, + ), + Table::OutputRegisters => Self::value_from_register( + address, + &runtime.config.output_registers, + data_type, + &runtime.output_registers, + ), }; Ok(Some(DataValue::new_now(value))) } - fn set_alias_value(modbus: Arc>, data_type: AliasType, number: u16, value: Variant) -> Result<(), StatusCode> { + fn set_alias_value( + modbus: Arc>, + data_type: AliasType, + number: u16, + value: Variant, + ) -> Result<(), StatusCode> { let (table, addr) = Table::table_from_number(number); match table { Table::OutputCoils => { @@ -302,28 +460,35 @@ impl AliasGetterSetter { let variant_type: VariantTypeId = data_type.into(); let value = value.cast(variant_type); // Write the words - let (_, words) = Self::value_to_words(value).map_err(|_| StatusCode::BadUnexpectedError)?; + let (_, words) = + Self::value_to_words(value).map_err(|_| StatusCode::BadUnexpectedError)?; let modbus = modbus.lock().unwrap(); modbus.write_to_registers(addr, words); Ok(()) } - _ => panic!("Invalid table") + _ => panic!("Invalid table"), } } - fn value_from_coil(address: u16, table_config: &TableConfig, values: &Arc>>) -> Variant { + fn value_from_coil( + address: u16, + table_config: &TableConfig, + values: &Arc>>, + ) -> Variant { let base_address = table_config.base_address; let cnt = table_config.count; if address < base_address || address >= base_address + cnt { // This should have been caught when validating config file - panic!("Address {} is not in the range of register values polled", address); + panic!( + "Address {} is not in the range of register values polled", + address + ); } let values = values.read().unwrap(); let idx = (address - base_address) as usize; Variant::from(*values.get(idx).unwrap()) } - fn word_2_to_bytes(w: &[u16]) -> [u8; 4] { assert_eq!(w.len(), 2, "Invalid length for 32-bit value"); let w0 = w[0].to_be_bytes(); @@ -346,16 +511,12 @@ impl AliasGetterSetter { let v = if v { 1u16 } else { 0u16 }; Ok((AliasType::Boolean, vec![v])) } - Variant::Byte(v) => { - Ok((AliasType::Byte, vec![v as u16])) - } + Variant::Byte(v) => Ok((AliasType::Byte, vec![v as u16])), Variant::SByte(v) => { let v = v as u16; Ok((AliasType::SByte, vec![v])) } - Variant::UInt16(v) => { - Ok((AliasType::UInt16, vec![v])) - } + Variant::UInt16(v) => Ok((AliasType::UInt16, vec![v])), Variant::Int16(v) => { let v = u16::from_be_bytes(v.to_be_bytes()); Ok((AliasType::Int16, vec![v])) @@ -404,9 +565,7 @@ impl AliasGetterSetter { let v3 = u16::from_be_bytes([b[6], b[7]]); Ok((AliasType::Double, vec![v0, v1, v2, v3])) } - _ => { - Err(()) - } + _ => Err(()), } } @@ -424,7 +583,13 @@ impl AliasGetterSetter { AliasType::SByte => { // Transmute bits and then clamp between MIN and MAX let v = i16::from_be_bytes(w.to_be_bytes()); - let v = if v < i8::MIN as i16 { i8::MIN } else if v > i8::MAX as i16 { i8::MAX } else { v as i8 }; + let v = if v < i8::MIN as i16 { + i8::MIN + } else if v > i8::MAX as i16 { + i8::MAX + } else { + v as i8 + }; Variant::from(v) } AliasType::UInt16 => { @@ -435,7 +600,7 @@ impl AliasGetterSetter { // Transmute bits Variant::from(i16::from_be_bytes(w.to_be_bytes())) } - _ => panic!() + _ => panic!(), } } fn words_to_value(data_type: AliasType, w: &[u16]) -> Variant { @@ -482,17 +647,28 @@ impl AliasGetterSetter { let v = f64::from_bits(bits); Variant::from(v) } - _ => panic!() + _ => panic!(), } } - fn value_from_register(address: u16, table_config: &TableConfig, data_type: AliasType, values: &Arc>>) -> Variant { + fn value_from_register( + address: u16, + table_config: &TableConfig, + data_type: AliasType, + values: &Arc>>, + ) -> Variant { let size = data_type.size_in_words(); let base_address = table_config.base_address; let cnt = table_config.count; - if address < base_address || address >= (base_address + cnt) || (address + size) >= (base_address + cnt) { + if address < base_address + || address >= (base_address + cnt) + || (address + size) >= (base_address + cnt) + { // This should have been caught when validating config file - panic!("Address {} is not in the range of register values polled", address); + panic!( + "Address {} is not in the range of register values polled", + address + ); } let idx = (address - base_address) as usize; @@ -503,9 +679,13 @@ impl AliasGetterSetter { Self::word_to_value(data_type, w) } else { match data_type { - AliasType::UInt32 | AliasType::Int32 | AliasType::Float => Self::words_to_value(data_type, &values[idx..=idx + 1]), - AliasType::UInt64 | AliasType::Int64 | AliasType::Double => Self::words_to_value(data_type, &values[idx..=idx + 3]), - _ => panic!() + AliasType::UInt32 | AliasType::Int32 | AliasType::Float => { + Self::words_to_value(data_type, &values[idx..=idx + 1]) + } + AliasType::UInt64 | AliasType::Int64 | AliasType::Double => { + Self::words_to_value(data_type, &values[idx..=idx + 3]) + } + _ => panic!(), } } } @@ -513,42 +693,95 @@ impl AliasGetterSetter { #[test] fn values_1_word() { - assert_eq!(AliasGetterSetter::word_to_value(AliasType::Boolean, 0u16), Variant::Boolean(false)); - assert_eq!(AliasGetterSetter::word_to_value(AliasType::Boolean, 1u16), Variant::Boolean(true)); - assert_eq!(AliasGetterSetter::word_to_value(AliasType::Boolean, 3u16), Variant::Boolean(true)); + assert_eq!( + AliasGetterSetter::word_to_value(AliasType::Boolean, 0u16), + Variant::Boolean(false) + ); + assert_eq!( + AliasGetterSetter::word_to_value(AliasType::Boolean, 1u16), + Variant::Boolean(true) + ); + assert_eq!( + AliasGetterSetter::word_to_value(AliasType::Boolean, 3u16), + Variant::Boolean(true) + ); // Tests that rely on bytes in the word, are expressed in little endian notation, created from using https://cryptii.com/pipes/integer-encoder // The intent is these tests should be able to run on other endian systems and still work. // SByte - assert_eq!(AliasGetterSetter::word_to_value(AliasType::SByte, u16::from_le_bytes([0x81, 0xff])), Variant::SByte(-127i8)); - assert_eq!(AliasGetterSetter::word_to_value(AliasType::SByte, u16::from_le_bytes([0x80, 0xff])), Variant::SByte(-128i8)); - assert_eq!(AliasGetterSetter::word_to_value(AliasType::SByte, u16::from_le_bytes([0x7f, 0x00])), Variant::SByte(127i8)); - assert_eq!(AliasGetterSetter::word_to_value(AliasType::SByte, u16::from_le_bytes([0xff, 0x00])), Variant::SByte(127i8)); + assert_eq!( + AliasGetterSetter::word_to_value(AliasType::SByte, u16::from_le_bytes([0x81, 0xff])), + Variant::SByte(-127i8) + ); + assert_eq!( + AliasGetterSetter::word_to_value(AliasType::SByte, u16::from_le_bytes([0x80, 0xff])), + Variant::SByte(-128i8) + ); + assert_eq!( + AliasGetterSetter::word_to_value(AliasType::SByte, u16::from_le_bytes([0x7f, 0x00])), + Variant::SByte(127i8) + ); + assert_eq!( + AliasGetterSetter::word_to_value(AliasType::SByte, u16::from_le_bytes([0xff, 0x00])), + Variant::SByte(127i8) + ); // Int16 - assert_eq!(AliasGetterSetter::word_to_value(AliasType::Int16, u16::from_le_bytes([0x9f, 0xf0])), Variant::Int16(-3937)); - assert_eq!(AliasGetterSetter::word_to_value(AliasType::Int16, u16::from_le_bytes([0x00, 0x00])), Variant::Int16(0)); - assert_eq!(AliasGetterSetter::word_to_value(AliasType::Int16, u16::from_le_bytes([0x6f, 0x7d])), Variant::Int16(32111)); + assert_eq!( + AliasGetterSetter::word_to_value(AliasType::Int16, u16::from_le_bytes([0x9f, 0xf0])), + Variant::Int16(-3937) + ); + assert_eq!( + AliasGetterSetter::word_to_value(AliasType::Int16, u16::from_le_bytes([0x00, 0x00])), + Variant::Int16(0) + ); + assert_eq!( + AliasGetterSetter::word_to_value(AliasType::Int16, u16::from_le_bytes([0x6f, 0x7d])), + Variant::Int16(32111) + ); // UInt16 - assert_eq!(AliasGetterSetter::word_to_value(AliasType::UInt16, 26555), Variant::UInt16(26555)); + assert_eq!( + AliasGetterSetter::word_to_value(AliasType::UInt16, 26555), + Variant::UInt16(26555) + ); } #[test] fn values_2_words() { // UInt32 - assert_eq!(AliasGetterSetter::words_to_value(AliasType::UInt32, &[0x0000, 0x0001]), Variant::UInt32(1)); - assert_eq!(AliasGetterSetter::words_to_value(AliasType::UInt32, &[0x0001, 0x0000]), Variant::UInt32(0x00010000)); + assert_eq!( + AliasGetterSetter::words_to_value(AliasType::UInt32, &[0x0000, 0x0001]), + Variant::UInt32(1) + ); + assert_eq!( + AliasGetterSetter::words_to_value(AliasType::UInt32, &[0x0001, 0x0000]), + Variant::UInt32(0x00010000) + ); // Int32 - assert_eq!(AliasGetterSetter::words_to_value(AliasType::Int32, &[0xfffe, 0x1dc0]), Variant::Int32(-123456i32)); - assert_eq!(AliasGetterSetter::words_to_value(AliasType::Int32, &[0x3ade, 0x68b1]), Variant::Int32(987654321i32)); + assert_eq!( + AliasGetterSetter::words_to_value(AliasType::Int32, &[0xfffe, 0x1dc0]), + Variant::Int32(-123456i32) + ); + assert_eq!( + AliasGetterSetter::words_to_value(AliasType::Int32, &[0x3ade, 0x68b1]), + Variant::Int32(987654321i32) + ); // Float - assert_eq!(AliasGetterSetter::words_to_value(AliasType::Float, &[0x0000, 0x0000]), Variant::Float(0f32)); - assert_eq!(AliasGetterSetter::words_to_value(AliasType::Float, &[0x4400, 0x0000]), Variant::Float(512f32)); - if let Variant::Float(v) = AliasGetterSetter::words_to_value(AliasType::Float, &[0x449A, 0x522B]) { + assert_eq!( + AliasGetterSetter::words_to_value(AliasType::Float, &[0x0000, 0x0000]), + Variant::Float(0f32) + ); + assert_eq!( + AliasGetterSetter::words_to_value(AliasType::Float, &[0x4400, 0x0000]), + Variant::Float(512f32) + ); + if let Variant::Float(v) = + AliasGetterSetter::words_to_value(AliasType::Float, &[0x449A, 0x522B]) + { // Expect value to be 1234.5678 assert!((v - 1234.5678).abs() < f32::EPSILON); } else { @@ -559,17 +792,37 @@ fn values_2_words() { #[test] fn values_4_words() { // UInt64 - assert_eq!(AliasGetterSetter::words_to_value(AliasType::UInt64, &[0x0000, 0x0000, 0x0000, 0x0001]), Variant::UInt64(1)); - assert_eq!(AliasGetterSetter::words_to_value(AliasType::UInt64, &[0x0000, 0x0000, 0x0001, 0x0000]), Variant::UInt64(0x0000000000010000)); - assert_eq!(AliasGetterSetter::words_to_value(AliasType::UInt64, &[0x0123, 0x4567, 0x89AB, 0xCDEF]), Variant::UInt64(0x0123456789ABCDEF)); + assert_eq!( + AliasGetterSetter::words_to_value(AliasType::UInt64, &[0x0000, 0x0000, 0x0000, 0x0001]), + Variant::UInt64(1) + ); + assert_eq!( + AliasGetterSetter::words_to_value(AliasType::UInt64, &[0x0000, 0x0000, 0x0001, 0x0000]), + Variant::UInt64(0x0000000000010000) + ); + assert_eq!( + AliasGetterSetter::words_to_value(AliasType::UInt64, &[0x0123, 0x4567, 0x89AB, 0xCDEF]), + Variant::UInt64(0x0123456789ABCDEF) + ); // Int64 - assert_eq!(AliasGetterSetter::words_to_value(AliasType::UInt64, &[0x0123, 0x4567, 0x89AB, 0xCDEF]), Variant::UInt64(0x0123456789ABCDEF)); + assert_eq!( + AliasGetterSetter::words_to_value(AliasType::UInt64, &[0x0123, 0x4567, 0x89AB, 0xCDEF]), + Variant::UInt64(0x0123456789ABCDEF) + ); // Double - assert_eq!(AliasGetterSetter::words_to_value(AliasType::Double, &[0x0000, 0x0000, 0x0000, 0x0000]), Variant::Double(0f64)); - assert_eq!(AliasGetterSetter::words_to_value(AliasType::Double, &[0x4080, 0x0000, 0x0000, 0x0000]), Variant::Double(512f64)); - if let Variant::Double(v) = AliasGetterSetter::words_to_value(AliasType::Double, &[0x4093, 0x4A45, 0x6D5C, 0xFAAD]) { + assert_eq!( + AliasGetterSetter::words_to_value(AliasType::Double, &[0x0000, 0x0000, 0x0000, 0x0000]), + Variant::Double(0f64) + ); + assert_eq!( + AliasGetterSetter::words_to_value(AliasType::Double, &[0x4080, 0x0000, 0x0000, 0x0000]), + Variant::Double(512f64) + ); + if let Variant::Double(v) = + AliasGetterSetter::words_to_value(AliasType::Double, &[0x4093, 0x4A45, 0x6D5C, 0xFAAD]) + { // Expect value to be 1234.5678 assert!((v - 1234.5678).abs() < f64::EPSILON); } else { diff --git a/samples/modbus-server/src/slave.rs b/samples/modbus-server/src/slave.rs index 1e79c3045..e8beb9bc3 100644 --- a/samples/modbus-server/src/slave.rs +++ b/samples/modbus-server/src/slave.rs @@ -2,10 +2,11 @@ // SPDX-License-Identifier: MPL-2.0 // Copyright (C) 2017-2020 Adam Lock -use futures::{ - future::{self, FutureResult}, +use futures::future::{self, FutureResult}; +use std::{ + sync::{Arc, RwLock}, + thread, time, }; -use std::{thread, time, sync::{Arc, RwLock}}; use tokio_service::Service; use tokio_modbus::prelude::*; @@ -61,7 +62,6 @@ impl Service for MbServer { type Error = std::io::Error; type Future = FutureResult; - fn call(&self, req: Self::Request) -> Self::Future { self.update_values(); match req { @@ -76,7 +76,8 @@ impl Service for MbServer { let data = self.data.read().unwrap(); let start = addr as usize; let end = start + cnt as usize; - let rsp = Response::ReadHoldingRegisters(data.output_registers[start..end].to_vec()); + let rsp = + Response::ReadHoldingRegisters(data.output_registers[start..end].to_vec()); future::ok(rsp) } Request::ReadDiscreteInputs(addr, cnt) => { @@ -107,7 +108,10 @@ impl Service for MbServer { } Request::WriteMultipleRegisters(addr, words) => { let mut data = self.data.write().unwrap(); - words.iter().enumerate().for_each(|(i, w)| data.output_registers[addr as usize + i] = *w); + words + .iter() + .enumerate() + .for_each(|(i, w)| data.output_registers[addr as usize + i] = *w); let rsp = Response::WriteMultipleRegisters(addr, words.len() as u16); future::ok(rsp) } @@ -120,9 +124,11 @@ pub fn run_modbus_slave(address: &str) { let socket_addr = address.parse().unwrap(); println!("Starting up slave..."); let _server = thread::spawn(move || { - tcp::Server::new(socket_addr).serve(|| Ok(MbServer { - start_time: time::Instant::now(), - data: Arc::new(RwLock::new(Data::new())), - })); + tcp::Server::new(socket_addr).serve(|| { + Ok(MbServer { + start_time: time::Instant::now(), + data: Arc::new(RwLock::new(Data::new())), + }) + }); }); -} \ No newline at end of file +} diff --git a/samples/mqtt-client/src/main.rs b/samples/mqtt-client/src/main.rs index d4e151926..b03762a03 100644 --- a/samples/mqtt-client/src/main.rs +++ b/samples/mqtt-client/src/main.rs @@ -6,7 +6,7 @@ //! values before exiting. use std::{ path::PathBuf, - sync::{Arc, mpsc, Mutex, RwLock}, + sync::{mpsc, Arc, Mutex, RwLock}, thread, }; @@ -27,22 +27,32 @@ impl Args { let mut args = pico_args::Arguments::from_env(); Ok(Args { help: args.contains(["-h", "--help"]), - config: args.opt_value_from_str("--config")?.unwrap_or(String::from(DEFAULT_CONFIG_FILE)), - endpoint_id: args.opt_value_from_str("--config")?.unwrap_or(String::from("")), - host: args.opt_value_from_str("--host")?.unwrap_or(String::from(DEFAULT_MQTT_HOST)), - port: args.opt_value_from_str("--port")?.unwrap_or(DEFAULT_MQTT_PORT), + config: args + .opt_value_from_str("--config")? + .unwrap_or(String::from(DEFAULT_CONFIG_FILE)), + endpoint_id: args + .opt_value_from_str("--config")? + .unwrap_or(String::from("")), + host: args + .opt_value_from_str("--host")? + .unwrap_or(String::from(DEFAULT_MQTT_HOST)), + port: args + .opt_value_from_str("--port")? + .unwrap_or(DEFAULT_MQTT_PORT), }) } pub fn usage() { - println!(r#"MQTT client + println!( + r#"MQTT client Usage: -h, --help Show help --config file Sets the configuration file to read settings and endpoints from (default: {}) --endpoint-id id Sets the endpoint id from the config file to connect to --host host Address or name of the MQTT server to connect with (default: {}) --port port Port number of MQTT server to connect with (default: {})"#, - DEFAULT_CONFIG_FILE, DEFAULT_MQTT_HOST, DEFAULT_MQTT_PORT); + DEFAULT_CONFIG_FILE, DEFAULT_MQTT_HOST, DEFAULT_MQTT_PORT + ); } } @@ -50,7 +60,6 @@ const DEFAULT_CONFIG_FILE: &str = "../client.conf/"; const DEFAULT_MQTT_HOST: &str = "broker.hivemq.com"; const DEFAULT_MQTT_PORT: u16 = 1883; - // This client will do the following: // // 1. Read a configuration file (either default or the one specified using --config) @@ -60,8 +69,7 @@ const DEFAULT_MQTT_PORT: u16 = 1883; // 5. User can observe result on the broker (e.g. http://www.mqtt-dashboard.com/) fn main() -> Result<(), ()> { - let args = Args::parse_args() - .map_err(|_| Args::usage())?; + let args = Args::parse_args().map_err(|_| Args::usage())?; if args.help { Args::usage(); } else { @@ -82,7 +90,10 @@ fn main() -> Result<(), ()> { loop { let (node_id, data_value) = rx.recv().unwrap(); - let topic = format!("opcua-rust/mqtt-client/{}/{}", node_id.namespace, node_id.identifier); + let topic = format!( + "opcua-rust/mqtt-client/{}/{}", + node_id.namespace, node_id.identifier + ); let value = if let Some(ref value) = data_value.value { format!("{:?}", value) } else { @@ -96,7 +107,11 @@ fn main() -> Result<(), ()> { // Use the sample client config to set up a client. The sample config has a number of named // endpoints one of which is marked as the default. let mut client = Client::new(ClientConfig::load(&PathBuf::from(config_file)).unwrap()); - let endpoint_id: Option<&str> = if !endpoint_id.is_empty() { Some(&endpoint_id) } else { None }; + let endpoint_id: Option<&str> = if !endpoint_id.is_empty() { + Some(&endpoint_id) + } else { + None + }; let ns = 2; if let Ok(session) = client.connect_to_endpoint_id(endpoint_id) { let _ = subscription_loop(session, tx, ns).map_err(|err| { @@ -107,7 +122,11 @@ fn main() -> Result<(), ()> { Ok(()) } -fn subscription_loop(session: Arc>, tx: mpsc::Sender<(NodeId, DataValue)>, ns: u16) -> Result<(), StatusCode> { +fn subscription_loop( + session: Arc>, + tx: mpsc::Sender<(NodeId, DataValue)>, + ns: u16, +) -> Result<(), StatusCode> { // Create a subscription println!("Creating subscription"); @@ -119,21 +138,35 @@ fn subscription_loop(session: Arc>, tx: mpsc::Sender<(NodeId, Da // Creates our subscription - one update every second. The update is sent as a message // to the MQTT thread to be published. let tx = Arc::new(Mutex::new(tx)); - let subscription_id = session.create_subscription(1000f64, 10, 30, 0, 0, true, DataChangeCallback::new(move |items| { - println!("Data change from server:"); - let tx = tx.lock().unwrap(); - items.iter().for_each(|item| { - let node_id = item.item_to_monitor().node_id.clone(); - let value = item.value().clone(); - let _ = tx.send((node_id, value)); - }); - }))?; + let subscription_id = session.create_subscription( + 1000f64, + 10, + 30, + 0, + 0, + true, + DataChangeCallback::new(move |items| { + println!("Data change from server:"); + let tx = tx.lock().unwrap(); + items.iter().for_each(|item| { + let node_id = item.item_to_monitor().node_id.clone(); + let value = item.value().clone(); + let _ = tx.send((node_id, value)); + }); + }), + )?; println!("Created a subscription with id = {}", subscription_id); // Create some monitored items - let items_to_create: Vec = ["v1", "v2", "v3", "v4"].iter() - .map(|v| NodeId::new(ns, *v).into()).collect(); - let _ = session.create_monitored_items(subscription_id, TimestampsToReturn::Both, &items_to_create)?; + let items_to_create: Vec = ["v1", "v2", "v3", "v4"] + .iter() + .map(|v| NodeId::new(ns, *v).into()) + .collect(); + let _ = session.create_monitored_items( + subscription_id, + TimestampsToReturn::Both, + &items_to_create, + )?; } // Loops forever. The publish thread will call the callback with changes on the variables diff --git a/samples/simple-client/src/main.rs b/samples/simple-client/src/main.rs index f3b478712..7110de2c5 100644 --- a/samples/simple-client/src/main.rs +++ b/samples/simple-client/src/main.rs @@ -21,15 +21,20 @@ impl Args { let mut args = pico_args::Arguments::from_env(); Ok(Args { help: args.contains(["-h", "--help"]), - url: args.opt_value_from_str("--url")?.unwrap_or(String::from(DEFAULT_URL)), + url: args + .opt_value_from_str("--url")? + .unwrap_or(String::from(DEFAULT_URL)), }) } pub fn usage() { - println!(r#"Simple Client + println!( + r#"Simple Client Usage: -h, --help Show help - --url [url] Url to connect to (default: {})"#, DEFAULT_URL); + --url [url] Url to connect to (default: {})"#, + DEFAULT_URL + ); } } @@ -37,8 +42,7 @@ const DEFAULT_URL: &str = "opc.tcp://localhost:4855"; fn main() -> Result<(), ()> { // Read command line arguments - let args = Args::parse_args() - .map_err(|_| Args::usage())?; + let args = Args::parse_args().map_err(|_| Args::usage())?; if args.help { Args::usage(); } else { @@ -52,11 +56,23 @@ fn main() -> Result<(), ()> { .trust_server_certs(true) .create_sample_keypair(true) .session_retry_limit(3) - .client().unwrap(); + .client() + .unwrap(); - if let Ok(session) = client.connect_to_endpoint((args.url.as_ref(), SecurityPolicy::None.to_str(), MessageSecurityMode::None, UserTokenPolicy::anonymous()), IdentityToken::Anonymous) { + if let Ok(session) = client.connect_to_endpoint( + ( + args.url.as_ref(), + SecurityPolicy::None.to_str(), + MessageSecurityMode::None, + UserTokenPolicy::anonymous(), + ), + IdentityToken::Anonymous, + ) { if let Err(result) = subscribe_to_variables(session.clone(), 2) { - println!("ERROR: Got an error while subscribing to variables - {}", result); + println!( + "ERROR: Got an error while subscribing to variables - {}", + result + ); } else { // Loops forever. The publish thread will call the callback with changes on the variables let _ = Session::run(session); @@ -69,16 +85,32 @@ fn main() -> Result<(), ()> { fn subscribe_to_variables(session: Arc>, ns: u16) -> Result<(), StatusCode> { let mut session = session.write().unwrap(); // Creates a subscription with a data change callback - let subscription_id = session.create_subscription(2000.0, 10, 30, 0, 0, true, DataChangeCallback::new(|changed_monitored_items| { - println!("Data change from server:"); - changed_monitored_items.iter().for_each(|item| print_value(item)); - }))?; + let subscription_id = session.create_subscription( + 2000.0, + 10, + 30, + 0, + 0, + true, + DataChangeCallback::new(|changed_monitored_items| { + println!("Data change from server:"); + changed_monitored_items + .iter() + .for_each(|item| print_value(item)); + }), + )?; println!("Created a subscription with id = {}", subscription_id); // Create some monitored items - let items_to_create: Vec = ["v1", "v2", "v3", "v4"].iter() - .map(|v| NodeId::new(ns, *v).into()).collect(); - let _ = session.create_monitored_items(subscription_id, TimestampsToReturn::Both, &items_to_create)?; + let items_to_create: Vec = ["v1", "v2", "v3", "v4"] + .iter() + .map(|v| NodeId::new(ns, *v).into()) + .collect(); + let _ = session.create_monitored_items( + subscription_id, + TimestampsToReturn::Both, + &items_to_create, + )?; Ok(()) } @@ -89,6 +121,10 @@ fn print_value(item: &MonitoredItem) { if let Some(ref value) = data_value.value { println!("Item \"{}\", Value = {:?}", node_id, value); } else { - println!("Item \"{}\", Value not found, error: {}", node_id, data_value.status.as_ref().unwrap()); + println!( + "Item \"{}\", Value not found, error: {}", + node_id, + data_value.status.as_ref().unwrap() + ); } -} \ No newline at end of file +} diff --git a/samples/simple-server/src/main.rs b/samples/simple-server/src/main.rs index 9d411ea2d..4fc6c39b1 100644 --- a/samples/simple-server/src/main.rs +++ b/samples/simple-server/src/main.rs @@ -21,7 +21,9 @@ fn main() { let ns = { let address_space = server.address_space(); let mut address_space = address_space.write().unwrap(); - address_space.register_namespace("urn:simple-server").unwrap() + address_space + .register_namespace("urn:simple-server") + .unwrap() }; // Add some variables of our own @@ -52,17 +54,19 @@ fn add_example_variables(server: &mut Server, ns: u16) { // Add some variables to our sample folder. Values will be overwritten by the timer let _ = address_space.add_variables( - vec![Variable::new(&v1_node, "v1", "v1", 0 as i32), - Variable::new(&v2_node, "v2", "v2", false), - Variable::new(&v3_node, "v3", "v3", UAString::from("")), - Variable::new(&v4_node, "v4", "v4", 0f64)], - &sample_folder_id); + vec![ + Variable::new(&v1_node, "v1", "v1", 0 as i32), + Variable::new(&v2_node, "v2", "v2", false), + Variable::new(&v3_node, "v3", "v3", UAString::from("")), + Variable::new(&v4_node, "v4", "v4", 0f64), + ], + &sample_folder_id, + ); } // OPC UA for Rust allows you to push or pull values from a variable so here are examples // of each method. - // 1) Pull. This code will add getters to v3 & v4 that returns their values by calling // function. { @@ -71,23 +75,32 @@ fn add_example_variables(server: &mut Server, ns: u16) { if let Some(ref mut v) = address_space.find_variable_mut(v3_node.clone()) { // Hello world's counter will increment with each get - slower interval == slower increment let mut counter = 0; - let getter = AttrFnGetter::new(move |_, _, _, _, _, _| -> Result, StatusCode> { - counter += 1; - Ok(Some(DataValue::new_now(UAString::from(format!("Hello World times {}", counter))))) - }); + let getter = AttrFnGetter::new( + move |_, _, _, _, _, _| -> Result, StatusCode> { + counter += 1; + Ok(Some(DataValue::new_now(UAString::from(format!( + "Hello World times {}", + counter + ))))) + }, + ); v.set_value_getter(Arc::new(Mutex::new(getter))); } if let Some(ref mut v) = address_space.find_variable_mut(v4_node.clone()) { // Sine wave draws 2*PI over course of 10 seconds - use std::f64::consts; use chrono::Utc; + use std::f64::consts; let start_time = Utc::now(); - let getter = AttrFnGetter::new(move |_, _, _, _, _, _| -> Result, StatusCode> { - let elapsed = Utc::now().signed_duration_since(start_time).num_milliseconds(); - let moment = (elapsed % 10000) as f64 / 10000.0; - Ok(Some(DataValue::new_now((2.0 * consts::PI * moment).sin()))) - }); + let getter = AttrFnGetter::new( + move |_, _, _, _, _, _| -> Result, StatusCode> { + let elapsed = Utc::now() + .signed_duration_since(start_time) + .num_milliseconds(); + let moment = (elapsed % 10000) as f64 / 10000.0; + Ok(Some(DataValue::new_now((2.0 * consts::PI * moment).sin()))) + }, + ); v.set_value_getter(Arc::new(Mutex::new(getter))); } } @@ -108,4 +121,4 @@ fn add_example_variables(server: &mut Server, ns: u16) { let _ = address_space.set_variable_value(v2_node.clone(), data.1, &now, &now); }); } -} \ No newline at end of file +} diff --git a/samples/web-client/src/main.rs b/samples/web-client/src/main.rs index 9972643c3..eabeb340b 100644 --- a/samples/web-client/src/main.rs +++ b/samples/web-client/src/main.rs @@ -7,21 +7,19 @@ extern crate serde_derive; use std::{ str::FromStr, - sync::{Arc, mpsc, RwLock}, + sync::{mpsc, Arc, RwLock}, time::{Duration, Instant}, }; use actix_web::{ - actix::{Actor, ActorContext, AsyncContext, Handler, Message, Running, StreamHandler}, App, Error, - fs, http, HttpRequest, HttpResponse, + actix::{Actor, ActorContext, AsyncContext, Handler, Message, Running, StreamHandler}, + fs, http, server::HttpServer, - ws, + ws, App, Error, HttpRequest, HttpResponse, }; use serde_json; -use opcua_client::{ - prelude::*, -}; +use opcua_client::prelude::*; struct Args { help: bool, @@ -33,23 +31,27 @@ impl Args { let mut args = pico_args::Arguments::from_env(); Ok(Args { help: args.contains(["-h", "--help"]), - http_port: args.opt_value_from_str("--http-port")?.unwrap_or(DEFAULT_HTTP_PORT), + http_port: args + .opt_value_from_str("--http-port")? + .unwrap_or(DEFAULT_HTTP_PORT), }) } pub fn usage() { - println!(r#"Web Client + println!( + r#"Web Client Usage: -h, --help Show help - --http-port The port number that this web server will run from (default: {})"#, DEFAULT_HTTP_PORT); + --http-port The port number that this web server will run from (default: {})"#, + DEFAULT_HTTP_PORT + ); } } const DEFAULT_HTTP_PORT: u16 = 8686; fn main() -> Result<(), ()> { - let args = Args::parse_args() - .map_err(|_| Args::usage())?; + let args = Args::parse_args().map_err(|_| Args::usage())?; if args.help { Args::usage(); } else { @@ -117,11 +119,15 @@ impl Handler for OPCUASession { fn handle(&mut self, msg: Event, ctx: &mut Self::Context) { // This is where we receive OPC UA events. It is here they are turned into JSON // and sent to the attached web socket. - println!("Received event {}", match &msg { - Event::ConnectionStatusChange(ref connected) => format!("ConnectionStatusChangeEvent({})", connected), - Event::DataChange(_) => "DataChangeEvent".to_string(), - Event::Event(_) => "Event".to_string() - }); + println!( + "Received event {}", + match &msg { + Event::ConnectionStatusChange(ref connected) => + format!("ConnectionStatusChangeEvent({})", connected), + Event::DataChange(_) => "DataChangeEvent".to_string(), + Event::Event(_) => "Event".to_string(), + } + ); ctx.text(serde_json::to_string(&msg).unwrap()) } } @@ -147,7 +153,8 @@ impl StreamHandler for OPCUASession { self.disconnect(ctx); } else if msg.starts_with("subscribe ") { // Node ids are comma separated - let node_ids: Vec = msg[10..].split(",").map(|s| s.to_string()).collect(); + let node_ids: Vec = + msg[10..].split(",").map(|s| s.to_string()).collect(); self.subscribe(ctx, node_ids); println!("subscription complete"); } else if msg.starts_with("add_event ") { @@ -181,15 +188,33 @@ impl OPCUASession { self.disconnect(ctx); let addr = ctx.address(); - let connected = match self.client.connect_to_endpoint((opcua_url, SecurityPolicy::None.to_str(), MessageSecurityMode::None, UserTokenPolicy::anonymous()), IdentityToken::Anonymous) { + let connected = match self.client.connect_to_endpoint( + ( + opcua_url, + SecurityPolicy::None.to_str(), + MessageSecurityMode::None, + UserTokenPolicy::anonymous(), + ), + IdentityToken::Anonymous, + ) { Ok(session) => { { let mut session = session.write().unwrap(); let addr_for_connection_status_change = addr.clone(); - session.set_connection_status_callback(ConnectionStatusCallback::new(move |connected| { - println!("Connection status has changed to {}", if connected { "connected" } else { "disconnected" }); - addr_for_connection_status_change.do_send(Event::ConnectionStatusChange(connected)); - })); + session.set_connection_status_callback(ConnectionStatusCallback::new( + move |connected| { + println!( + "Connection status has changed to {}", + if connected { + "connected" + } else { + "disconnected" + } + ); + addr_for_connection_status_change + .do_send(Event::ConnectionStatusChange(connected)); + }, + )); session.set_session_closed_callback(SessionClosedCallback::new(|status| { println!("Session has been closed, status = {}", status); })); @@ -199,7 +224,10 @@ impl OPCUASession { true } Err(err) => { - println!("ERROR: Got an error while trying to connect to session - {}", err); + println!( + "ERROR: Got an error while trying to connect to session - {}", + err + ); false } }; @@ -222,7 +250,12 @@ impl OPCUASession { } fn lhs_operand(op: &str) -> Operand { - Operand::simple_attribute(ReferenceTypeId::Organizes, op, AttributeId::Value, UAString::null()) + Operand::simple_attribute( + ReferenceTypeId::Organizes, + op, + AttributeId::Value, + UAString::null(), + ) } fn rhs_operand(op: &str, lhs: &str) -> Option { @@ -232,13 +265,18 @@ impl OPCUASession { // Treat as a browse path to an event // ObjectTypeId::BaseEventType let base_event_type = NodeId::from((0, 2041)); - Some(Operand::simple_attribute(base_event_type, op, AttributeId::Value, UAString::null())) + Some(Operand::simple_attribute( + base_event_type, + op, + AttributeId::Value, + UAString::null(), + )) } else { // A couple of lhs values should be parsed to types other than a string match lhs { // "SourceNode" => NodeId::from_str(op).map(|v| Operand::literal(v)).ok(), // "Severity" => u16::from_str(op).map(|v| Operand::literal(v)).ok(), - op => Some(Operand::literal(op)) + op => Some(Operand::literal(op)), } } } @@ -262,9 +300,7 @@ impl OPCUASession { let where_clause = ""; // TODO remove let where_clause = if where_clause.is_empty() { - ContentFilter { - elements: None, - } + ContentFilter { elements: None } } else { let where_parts = where_clause.split("|").collect::>(); if where_parts.len() != 3 { @@ -299,19 +335,25 @@ impl OPCUASession { // Where clause ContentFilter { - elements: Some(vec![ContentFilterElement::from((operator, vec![lhs, rhs.unwrap()]))]), + elements: Some(vec![ContentFilterElement::from(( + operator, + vec![lhs, rhs.unwrap()], + ))]), } }; // Select clauses - let select_clauses = Some(select_criteria.split("|").map(|s| { - SimpleAttributeOperand { - type_definition_id: ObjectTypeId::BaseEventType.into(), - browse_path: Some(vec![QualifiedName::from(s)]), - attribute_id: AttributeId::Value as u32, - index_range: UAString::null(), - } - }).collect()); + let select_clauses = Some( + select_criteria + .split("|") + .map(|s| SimpleAttributeOperand { + type_definition_id: ObjectTypeId::BaseEventType.into(), + browse_path: Some(vec![QualifiedName::from(s)]), + attribute_id: AttributeId::Value as u32, + index_range: UAString::null(), + }) + .collect(), + ); let event_filter = EventFilter { where_clause, @@ -329,12 +371,21 @@ impl OPCUASession { }); // create a subscription containing events - if let Ok(subscription_id) = session.create_subscription(2000.0, 100, 300, 0, 0, true, event_callback) { + if let Ok(subscription_id) = + session.create_subscription(2000.0, 100, 300, 0, 0, true, event_callback) + { // Monitor the item for events let mut item_to_create: MonitoredItemCreateRequest = event_node_id.into(); item_to_create.item_to_monitor.attribute_id = AttributeId::EventNotifier as u32; - item_to_create.requested_parameters.filter = ExtensionObject::from_encodable(ObjectId::EventFilter_Encoding_DefaultBinary, &event_filter); - if let Ok(result) = session.create_monitored_items(subscription_id, TimestampsToReturn::Both, &vec![item_to_create]) { + item_to_create.requested_parameters.filter = ExtensionObject::from_encodable( + ObjectId::EventFilter_Encoding_DefaultBinary, + &event_filter, + ); + if let Ok(result) = session.create_monitored_items( + subscription_id, + TimestampsToReturn::Both, + &vec![item_to_create], + ) { println!("Result of subscribing to event = {:?}", result); } else { println!("Cannot create monitored event!"); @@ -357,26 +408,38 @@ impl OPCUASession { let data_change_callback = DataChangeCallback::new(move |items| { // Changes will be turned into a list of change events that sent to corresponding // web socket to be sent to the client. - let changes = items.iter().map(|item| { - let item_to_monitor = item.item_to_monitor(); - DataChangeEvent { - node_id: item_to_monitor.node_id.clone().into(), - attribute_id: item_to_monitor.attribute_id, - value: item.value().clone(), - } - }).collect::>(); + let changes = items + .iter() + .map(|item| { + let item_to_monitor = item.item_to_monitor(); + DataChangeEvent { + node_id: item_to_monitor.node_id.clone().into(), + attribute_id: item_to_monitor.attribute_id, + value: item.value().clone(), + } + }) + .collect::>(); // Send the changes to the websocket session addr_for_datachange.do_send(Event::DataChange(changes)); }); - if let Ok(subscription_id) = session.create_subscription(500.0, 10, 30, 0, 0, true, data_change_callback) { + if let Ok(subscription_id) = + session.create_subscription(500.0, 10, 30, 0, 0, true, data_change_callback) + { println!("Created a subscription with id = {}", subscription_id); // Create some monitored items - let items_to_create: Vec = node_ids.iter().map(|node_id| { - let node_id = NodeId::from_str(node_id).unwrap(); // Trust client to not break this - node_id.into() - }).collect(); - if let Ok(_results) = session.create_monitored_items(subscription_id, TimestampsToReturn::Both, &items_to_create) { + let items_to_create: Vec = node_ids + .iter() + .map(|node_id| { + let node_id = NodeId::from_str(node_id).unwrap(); // Trust client to not break this + node_id.into() + }) + .collect(); + if let Ok(_results) = session.create_monitored_items( + subscription_id, + TimestampsToReturn::Both, + &items_to_create, + ) { println!("Created monitored items"); } else { println!("Cannot create monitored items!"); @@ -396,14 +459,18 @@ fn ws_create_request(r: &HttpRequest) -> Result { - $a.find_node($id).and_then(|node| { - match node { - NodeType::$node_type(ref node) => Some(node.as_ref()), - _ => None - } + ($a: expr, $id: expr, $node_type: ident) => { + $a.find_node($id).and_then(|node| match node { + NodeType::$node_type(ref node) => Some(node.as_ref()), + _ => None, }) - } + }; } /// Finds a node in the address space and coerces it into a mutable reference of the expected node type. macro_rules! find_node_mut { - ($a: expr, $id: expr, $node_type: ident) => { - $a.find_node_mut($id).and_then(|node| { - match node { - NodeType::$node_type(ref mut node) => Some(node.as_mut()), - _ => None - } + ($a: expr, $id: expr, $node_type: ident) => { + $a.find_node_mut($id).and_then(|node| match node { + NodeType::$node_type(ref mut node) => Some(node.as_mut()), + _ => None, }) - } + }; } /// Searches for the specified node by type, expecting it to exist macro_rules! expect_and_find_node { ($a: expr, $id: expr, $node_type: ident) => { - find_node!($a, $id, $node_type).or_else(|| { - panic!("There should be a node of id {:?}!", $id); - }).unwrap() - } + find_node!($a, $id, $node_type) + .or_else(|| { + panic!("There should be a node of id {:?}!", $id); + }) + .unwrap() + }; } /// Searches for the specified object node, expecting it to exist macro_rules! expect_and_find_object { ($a: expr, $id: expr) => { expect_and_find_node!($a, $id, Object) - } + }; } /// Tests if the node of the expected type exists @@ -83,39 +80,46 @@ macro_rules! is_node { } else { false } - } + }; } /// Tests if the object node exists macro_rules! is_object { ($a: expr, $id: expr) => { is_node!($a, $id, Object) - } + }; } /// Tests if the method node exists macro_rules! is_method { ($a: expr, $id: expr) => { is_node!($a, $id, Method) - } + }; } /// Gets a field from the live diagnostics table. macro_rules! server_diagnostics_summary { ($address_space: expr, $variable_id: expr, $field: ident) => { let server_diagnostics = $address_space.server_diagnostics.as_ref().unwrap().clone(); - $address_space.set_variable_getter($variable_id, move |_, timestamps_to_return, _, _, _, _| { - let server_diagnostics = server_diagnostics.read().unwrap(); - let server_diagnostics_summary = server_diagnostics.server_diagnostics_summary(); - - debug!("Request to get server diagnostics field {}, value = {}", stringify!($variable_id), server_diagnostics_summary.$field); - - let mut value = DataValue::from(Variant::from(server_diagnostics_summary.$field)); - let now = DateTime::now(); - value.set_timestamps(timestamps_to_return, now.clone(), now); - Ok(Some(value)) - }); - } + $address_space.set_variable_getter( + $variable_id, + move |_, timestamps_to_return, _, _, _, _| { + let server_diagnostics = server_diagnostics.read().unwrap(); + let server_diagnostics_summary = server_diagnostics.server_diagnostics_summary(); + + debug!( + "Request to get server diagnostics field {}, value = {}", + stringify!($variable_id), + server_diagnostics_summary.$field + ); + + let mut value = DataValue::from(Variant::from(server_diagnostics_summary.$field)); + let now = DateTime::now(); + value.set_timestamps(timestamps_to_return, now.clone(), now); + Ok(Some(value)) + }, + ); + }; } pub(crate) type MethodCallback = Box; @@ -235,16 +239,24 @@ impl AddressSpace { /// Finds the namespace index of a given namespace pub fn namespace_index(&self, namespace: &str) -> Option { - self.namespaces.iter().position(|ns| { - let ns: &str = ns.as_ref(); - ns == namespace - }).map(|i| i as u16) + self.namespaces + .iter() + .position(|ns| { + let ns: &str = ns.as_ref(); + ns == namespace + }) + .map(|i| i as u16) } fn set_servers(&mut self, server_state: Arc>, now: &DateTime) { let server_state = trace_read_lock_unwrap!(server_state); if let Some(ref mut v) = self.find_variable_mut(Server_ServerArray) { - let _ = v.set_value_direct(Variant::from(&server_state.servers), StatusCode::Good, now, now); + let _ = v.set_value_direct( + Variant::from(&server_state.servers), + StatusCode::Good, + now, + now, + ); } } @@ -263,7 +275,8 @@ impl AddressSpace { /// Sets values for nodes representing the server. pub fn set_server_state(&mut self, server_state: Arc>) { // Server state requires the generated address space, otherwise nothing - #[cfg(feature = "generated-address-space")] { + #[cfg(feature = "generated-address-space")] + { let now = DateTime::now(); // Servers @@ -280,33 +293,134 @@ impl AddressSpace { { let server_state = trace_read_lock_unwrap!(server_state); let server_config = trace_read_lock_unwrap!(server_state.config); - self.set_variable_value(Server_ServerCapabilities_MaxArrayLength, server_config.limits.max_array_length as u32, &now, &now); - self.set_variable_value(Server_ServerCapabilities_MaxStringLength, server_config.limits.max_string_length as u32, &now, &now); - self.set_variable_value(Server_ServerCapabilities_MaxByteStringLength, server_config.limits.max_byte_string_length as u32, &now, &now); - self.set_variable_value(Server_ServerCapabilities_MaxBrowseContinuationPoints, constants::MAX_BROWSE_CONTINUATION_POINTS as u32, &now, &now); - self.set_variable_value(Server_ServerCapabilities_MaxHistoryContinuationPoints, constants::MAX_HISTORY_CONTINUATION_POINTS as u32, &now, &now); - self.set_variable_value(Server_ServerCapabilities_MaxQueryContinuationPoints, constants::MAX_QUERY_CONTINUATION_POINTS as u32, &now, &now); - self.set_variable_value(Server_ServerCapabilities_MinSupportedSampleRate, constants::MIN_SAMPLING_INTERVAL as f64, &now, &now); - let locale_ids: Vec = server_config.locale_ids.iter().map(|v| UAString::from(v).into()).collect(); - self.set_variable_value(Server_ServerCapabilities_LocaleIdArray, locale_ids, &now, &now); + self.set_variable_value( + Server_ServerCapabilities_MaxArrayLength, + server_config.limits.max_array_length as u32, + &now, + &now, + ); + self.set_variable_value( + Server_ServerCapabilities_MaxStringLength, + server_config.limits.max_string_length as u32, + &now, + &now, + ); + self.set_variable_value( + Server_ServerCapabilities_MaxByteStringLength, + server_config.limits.max_byte_string_length as u32, + &now, + &now, + ); + self.set_variable_value( + Server_ServerCapabilities_MaxBrowseContinuationPoints, + constants::MAX_BROWSE_CONTINUATION_POINTS as u32, + &now, + &now, + ); + self.set_variable_value( + Server_ServerCapabilities_MaxHistoryContinuationPoints, + constants::MAX_HISTORY_CONTINUATION_POINTS as u32, + &now, + &now, + ); + self.set_variable_value( + Server_ServerCapabilities_MaxQueryContinuationPoints, + constants::MAX_QUERY_CONTINUATION_POINTS as u32, + &now, + &now, + ); + self.set_variable_value( + Server_ServerCapabilities_MinSupportedSampleRate, + constants::MIN_SAMPLING_INTERVAL as f64, + &now, + &now, + ); + let locale_ids: Vec = server_config + .locale_ids + .iter() + .map(|v| UAString::from(v).into()) + .collect(); + self.set_variable_value( + Server_ServerCapabilities_LocaleIdArray, + locale_ids, + &now, + &now, + ); let ol = &server_state.operational_limits; - self.set_variable_value(Server_ServerCapabilities_OperationLimits_MaxNodesPerRead, ol.max_nodes_per_read as u32, &now, &now); - self.set_variable_value(Server_ServerCapabilities_OperationLimits_MaxNodesPerWrite, ol.max_nodes_per_write as u32, &now, &now); - self.set_variable_value(Server_ServerCapabilities_OperationLimits_MaxNodesPerMethodCall, ol.max_nodes_per_method_call as u32, &now, &now); - self.set_variable_value(Server_ServerCapabilities_OperationLimits_MaxNodesPerBrowse, ol.max_nodes_per_browse as u32, &now, &now); - self.set_variable_value(Server_ServerCapabilities_OperationLimits_MaxNodesPerRegisterNodes, ol.max_nodes_per_register_nodes as u32, &now, &now); + self.set_variable_value( + Server_ServerCapabilities_OperationLimits_MaxNodesPerRead, + ol.max_nodes_per_read as u32, + &now, + &now, + ); + self.set_variable_value( + Server_ServerCapabilities_OperationLimits_MaxNodesPerWrite, + ol.max_nodes_per_write as u32, + &now, + &now, + ); + self.set_variable_value( + Server_ServerCapabilities_OperationLimits_MaxNodesPerMethodCall, + ol.max_nodes_per_method_call as u32, + &now, + &now, + ); + self.set_variable_value( + Server_ServerCapabilities_OperationLimits_MaxNodesPerBrowse, + ol.max_nodes_per_browse as u32, + &now, + &now, + ); + self.set_variable_value( + Server_ServerCapabilities_OperationLimits_MaxNodesPerRegisterNodes, + ol.max_nodes_per_register_nodes as u32, + &now, + &now, + ); self.set_variable_value(Server_ServerCapabilities_OperationLimits_MaxNodesPerTranslateBrowsePathsToNodeIds, ol.max_nodes_per_translate_browse_paths_to_node_ids as u32, &now, &now); - self.set_variable_value(Server_ServerCapabilities_OperationLimits_MaxNodesPerNodeManagement, ol.max_nodes_per_node_management as u32, &now, &now); - self.set_variable_value(Server_ServerCapabilities_OperationLimits_MaxMonitoredItemsPerCall, ol.max_monitored_items_per_call as u32, &now, &now); - self.set_variable_value(Server_ServerCapabilities_OperationLimits_MaxNodesPerHistoryReadData, ol.max_nodes_per_history_read_data as u32, &now, &now); - self.set_variable_value(Server_ServerCapabilities_OperationLimits_MaxNodesPerHistoryReadEvents, ol.max_nodes_per_history_read_events as u32, &now, &now); - self.set_variable_value(Server_ServerCapabilities_OperationLimits_MaxNodesPerHistoryUpdateData, ol.max_nodes_per_history_update_data as u32, &now, &now); - self.set_variable_value(Server_ServerCapabilities_OperationLimits_MaxNodesPerHistoryUpdateEvents, ol.max_nodes_per_history_update_events as u32, &now, &now); + self.set_variable_value( + Server_ServerCapabilities_OperationLimits_MaxNodesPerNodeManagement, + ol.max_nodes_per_node_management as u32, + &now, + &now, + ); + self.set_variable_value( + Server_ServerCapabilities_OperationLimits_MaxMonitoredItemsPerCall, + ol.max_monitored_items_per_call as u32, + &now, + &now, + ); + self.set_variable_value( + Server_ServerCapabilities_OperationLimits_MaxNodesPerHistoryReadData, + ol.max_nodes_per_history_read_data as u32, + &now, + &now, + ); + self.set_variable_value( + Server_ServerCapabilities_OperationLimits_MaxNodesPerHistoryReadEvents, + ol.max_nodes_per_history_read_events as u32, + &now, + &now, + ); + self.set_variable_value( + Server_ServerCapabilities_OperationLimits_MaxNodesPerHistoryUpdateData, + ol.max_nodes_per_history_update_data as u32, + &now, + &now, + ); + self.set_variable_value( + Server_ServerCapabilities_OperationLimits_MaxNodesPerHistoryUpdateEvents, + ol.max_nodes_per_history_update_events as u32, + &now, + &now, + ); } // Server_ServerCapabilities_ServerProfileArray - if let Some(ref mut v) = self.find_variable_mut(Server_ServerCapabilities_ServerProfileArray) { + if let Some(ref mut v) = + self.find_variable_mut(Server_ServerCapabilities_ServerProfileArray) + { // Declares what the server implements. Subitems are implied by the profile. A subitem // marked - is optional to the spec let server_profiles = [ @@ -366,7 +480,6 @@ impl AddressSpace { // Security // Security Default ApplicationInstanceCertificate - has a default ApplicationInstanceCertificate that is valid "http://opcfoundation.org/UA-Profile/Server/EmbeddedUA", - // TODO server profile // Standard UA Server Profile // Enhanced DataChange Subscription Server Facet @@ -389,7 +502,12 @@ impl AddressSpace { // // "http://opcfoundation.org/UA-Profile/Server/StandardUA", ]; - let _ = v.set_value_direct(Variant::from(&server_profiles[..]), StatusCode::Good, &now, &now); + let _ = v.set_value_direct( + Variant::from(&server_profiles[..]), + StatusCode::Good, + &now, + &now, + ); } // Server_ServerDiagnostics_ServerDiagnosticsSummary @@ -399,18 +517,66 @@ impl AddressSpace { { let server_state = trace_read_lock_unwrap!(server_state); self.server_diagnostics = Some(server_state.diagnostics.clone()); - server_diagnostics_summary!(self, Server_ServerDiagnostics_ServerDiagnosticsSummary_ServerViewCount, server_view_count); - server_diagnostics_summary!(self, Server_ServerDiagnostics_ServerDiagnosticsSummary_CurrentSessionCount, current_session_count); - server_diagnostics_summary!(self, Server_ServerDiagnostics_ServerDiagnosticsSummary_CumulatedSessionCount, cumulated_session_count); - server_diagnostics_summary!(self, Server_ServerDiagnostics_ServerDiagnosticsSummary_SecurityRejectedSessionCount, security_rejected_session_count); - server_diagnostics_summary!(self, Server_ServerDiagnostics_ServerDiagnosticsSummary_SessionTimeoutCount, session_timeout_count); - server_diagnostics_summary!(self, Server_ServerDiagnostics_ServerDiagnosticsSummary_SessionAbortCount, session_abort_count); - server_diagnostics_summary!(self, Server_ServerDiagnostics_ServerDiagnosticsSummary_RejectedSessionCount, rejected_session_count); - server_diagnostics_summary!(self, Server_ServerDiagnostics_ServerDiagnosticsSummary_PublishingIntervalCount, publishing_interval_count); - server_diagnostics_summary!(self, Server_ServerDiagnostics_ServerDiagnosticsSummary_CurrentSubscriptionCount, current_subscription_count); - server_diagnostics_summary!(self, Server_ServerDiagnostics_ServerDiagnosticsSummary_CumulatedSubscriptionCount, cumulated_subscription_count); - server_diagnostics_summary!(self, Server_ServerDiagnostics_ServerDiagnosticsSummary_SecurityRejectedRequestsCount, security_rejected_requests_count); - server_diagnostics_summary!(self, Server_ServerDiagnostics_ServerDiagnosticsSummary_RejectedRequestsCount, rejected_requests_count); + server_diagnostics_summary!( + self, + Server_ServerDiagnostics_ServerDiagnosticsSummary_ServerViewCount, + server_view_count + ); + server_diagnostics_summary!( + self, + Server_ServerDiagnostics_ServerDiagnosticsSummary_CurrentSessionCount, + current_session_count + ); + server_diagnostics_summary!( + self, + Server_ServerDiagnostics_ServerDiagnosticsSummary_CumulatedSessionCount, + cumulated_session_count + ); + server_diagnostics_summary!( + self, + Server_ServerDiagnostics_ServerDiagnosticsSummary_SecurityRejectedSessionCount, + security_rejected_session_count + ); + server_diagnostics_summary!( + self, + Server_ServerDiagnostics_ServerDiagnosticsSummary_SessionTimeoutCount, + session_timeout_count + ); + server_diagnostics_summary!( + self, + Server_ServerDiagnostics_ServerDiagnosticsSummary_SessionAbortCount, + session_abort_count + ); + server_diagnostics_summary!( + self, + Server_ServerDiagnostics_ServerDiagnosticsSummary_RejectedSessionCount, + rejected_session_count + ); + server_diagnostics_summary!( + self, + Server_ServerDiagnostics_ServerDiagnosticsSummary_PublishingIntervalCount, + publishing_interval_count + ); + server_diagnostics_summary!( + self, + Server_ServerDiagnostics_ServerDiagnosticsSummary_CurrentSubscriptionCount, + current_subscription_count + ); + server_diagnostics_summary!( + self, + Server_ServerDiagnostics_ServerDiagnosticsSummary_CumulatedSubscriptionCount, + cumulated_subscription_count + ); + server_diagnostics_summary!( + self, + Server_ServerDiagnostics_ServerDiagnosticsSummary_SecurityRejectedRequestsCount, + security_rejected_requests_count + ); + server_diagnostics_summary!( + self, + Server_ServerDiagnostics_ServerDiagnosticsSummary_RejectedRequestsCount, + rejected_requests_count + ); } // ServiceLevel - 0-255 worst to best quality of service @@ -425,22 +591,28 @@ impl AddressSpace { self.set_variable_value(Server_ServerStatus_StartTime, now.clone(), &now, &now); // Server_ServerStatus_CurrentTime - self.set_variable_getter(Server_ServerStatus_CurrentTime, move |_, timestamps_to_return, _, _, _, _| { - let now = DateTime::now(); - let mut value = DataValue::from(now.clone()); - value.set_timestamps(timestamps_to_return, now.clone(), now); - Ok(Some(value)) - }); + self.set_variable_getter( + Server_ServerStatus_CurrentTime, + move |_, timestamps_to_return, _, _, _, _| { + let now = DateTime::now(); + let mut value = DataValue::from(now.clone()); + value.set_timestamps(timestamps_to_return, now.clone(), now); + Ok(Some(value)) + }, + ); // State OPC UA Part 5 12.6, Valid states are // State (Server_ServerStatus_State) - self.set_variable_getter(Server_ServerStatus_State, move |_, timestamps_to_return, _, _, _, _| { - // let server_state = trace_read_lock_unwrap!(server_state); - let now = DateTime::now(); - let mut value = DataValue::from(0i32); - value.set_timestamps(timestamps_to_return, now.clone(), now); - Ok(Some(value)) - }); + self.set_variable_getter( + Server_ServerStatus_State, + move |_, timestamps_to_return, _, _, _, _| { + // let server_state = trace_read_lock_unwrap!(server_state); + let now = DateTime::now(); + let mut value = DataValue::from(0i32); + value.set_timestamps(timestamps_to_return, now.clone(), now); + Ok(Some(value)) + }, + ); // ServerStatus_BuildInfo { @@ -454,27 +626,98 @@ impl AddressSpace { // Server method handlers use crate::address_space::method_impls; - self.register_method_handler(MethodId::Server_ResendData, Box::new(method_impls::ServerResendDataMethod)); - self.register_method_handler(MethodId::Server_GetMonitoredItems, Box::new(method_impls::ServerGetMonitoredItemsMethod)); + self.register_method_handler( + MethodId::Server_ResendData, + Box::new(method_impls::ServerResendDataMethod), + ); + self.register_method_handler( + MethodId::Server_GetMonitoredItems, + Box::new(method_impls::ServerGetMonitoredItemsMethod), + ); } } /// Sets the history server capabilities based on the supplied flags pub fn set_history_server_capabilities(&mut self, capabilities: &HistoryServerCapabilities) { let now = DateTime::now(); - self.set_variable_value(HistoryServerCapabilities_AccessHistoryDataCapability, capabilities.access_history_data, &now, &now); - self.set_variable_value(HistoryServerCapabilities_AccessHistoryEventsCapability, capabilities.access_history_events, &now, &now); - self.set_variable_value(HistoryServerCapabilities_MaxReturnDataValues, capabilities.max_return_data, &now, &now); - self.set_variable_value(HistoryServerCapabilities_MaxReturnEventValues, capabilities.max_return_events, &now, &now); - self.set_variable_value(HistoryServerCapabilities_InsertDataCapability, capabilities.insert_data, &now, &now); - self.set_variable_value(HistoryServerCapabilities_ReplaceDataCapability, capabilities.replace_data, &now, &now); - self.set_variable_value(HistoryServerCapabilities_UpdateDataCapability, capabilities.update_data, &now, &now); - self.set_variable_value(HistoryServerCapabilities_DeleteRawCapability, capabilities.delete_raw, &now, &now); - self.set_variable_value(HistoryServerCapabilities_DeleteAtTimeCapability, capabilities.delete_at_time, &now, &now); - self.set_variable_value(HistoryServerCapabilities_InsertEventCapability, capabilities.insert_event, &now, &now); - self.set_variable_value(HistoryServerCapabilities_ReplaceEventCapability, capabilities.replace_event, &now, &now); - self.set_variable_value(HistoryServerCapabilities_UpdateEventCapability, capabilities.update_event, &now, &now); - self.set_variable_value(HistoryServerCapabilities_InsertAnnotationCapability, capabilities.insert_annotation, &now, &now); + self.set_variable_value( + HistoryServerCapabilities_AccessHistoryDataCapability, + capabilities.access_history_data, + &now, + &now, + ); + self.set_variable_value( + HistoryServerCapabilities_AccessHistoryEventsCapability, + capabilities.access_history_events, + &now, + &now, + ); + self.set_variable_value( + HistoryServerCapabilities_MaxReturnDataValues, + capabilities.max_return_data, + &now, + &now, + ); + self.set_variable_value( + HistoryServerCapabilities_MaxReturnEventValues, + capabilities.max_return_events, + &now, + &now, + ); + self.set_variable_value( + HistoryServerCapabilities_InsertDataCapability, + capabilities.insert_data, + &now, + &now, + ); + self.set_variable_value( + HistoryServerCapabilities_ReplaceDataCapability, + capabilities.replace_data, + &now, + &now, + ); + self.set_variable_value( + HistoryServerCapabilities_UpdateDataCapability, + capabilities.update_data, + &now, + &now, + ); + self.set_variable_value( + HistoryServerCapabilities_DeleteRawCapability, + capabilities.delete_raw, + &now, + &now, + ); + self.set_variable_value( + HistoryServerCapabilities_DeleteAtTimeCapability, + capabilities.delete_at_time, + &now, + &now, + ); + self.set_variable_value( + HistoryServerCapabilities_InsertEventCapability, + capabilities.insert_event, + &now, + &now, + ); + self.set_variable_value( + HistoryServerCapabilities_ReplaceEventCapability, + capabilities.replace_event, + &now, + &now, + ); + self.set_variable_value( + HistoryServerCapabilities_UpdateEventCapability, + capabilities.update_event, + &now, + &now, + ); + self.set_variable_value( + HistoryServerCapabilities_InsertAnnotationCapability, + capabilities.insert_annotation, + &now, + &now, + ); } /// Returns the root folder @@ -526,9 +769,15 @@ impl AddressSpace { /// Inserts a node into the address space node map and its references to other target nodes. /// The tuple of references is the target node id, reference type id and a bool which is false for /// a forward reference and indicating inverse - pub fn insert(&mut self, node: T, references: Option<&[(&NodeId, &S, ReferenceDirection)]>) -> bool - where T: Into, - S: Into + Clone { + pub fn insert( + &mut self, + node: T, + references: Option<&[(&NodeId, &S, ReferenceDirection)]>, + ) -> bool + where + T: Into, + S: Into + Clone, + { let node_type = node.into(); let node_id = node_type.node_id(); @@ -552,7 +801,8 @@ impl AddressSpace { pub fn add_default_nodes(&mut self) { debug!("populating address space"); - #[cfg(feature = "generated-address-space")] { + #[cfg(feature = "generated-address-space")] + { // Reserve space in the maps. The default node set contains just under 2000 values for // nodes, references and inverse references. self.node_map.reserve(2000); @@ -563,22 +813,36 @@ impl AddressSpace { // Inserts a bunch of references between two nodes into the address space pub fn insert_references(&mut self, references: &[(&NodeId, &NodeId, &T)]) - where T: Into + Clone + where + T: Into + Clone, { self.references.insert_references(references); self.update_last_modified(); } /// Inserts a single reference between two nodes in the address space - pub fn insert_reference(&mut self, node_id: &NodeId, target_node_id: &NodeId, reference_type_id: T) - where T: Into + Clone + pub fn insert_reference( + &mut self, + node_id: &NodeId, + target_node_id: &NodeId, + reference_type_id: T, + ) where + T: Into + Clone, { - self.references.insert_reference(node_id, target_node_id, &reference_type_id); + self.references + .insert_reference(node_id, target_node_id, &reference_type_id); self.update_last_modified(); } - pub fn set_node_type(&mut self, node_id: &NodeId, node_type: T) where T: Into { - self.insert_reference(node_id, &node_type.into(), ReferenceTypeId::HasTypeDefinition); + pub fn set_node_type(&mut self, node_id: &NodeId, node_type: T) + where + T: Into, + { + self.insert_reference( + node_id, + &node_type.into(), + ReferenceTypeId::HasTypeDefinition, + ); } pub fn node_exists(&self, node_id: &NodeId) -> bool { @@ -586,8 +850,16 @@ impl AddressSpace { } /// Adds a folder with a specified id - pub fn add_folder_with_id(&mut self, node_id: &NodeId, browse_name: R, display_name: S, parent_node_id: &NodeId) -> bool - where R: Into, S: Into + pub fn add_folder_with_id( + &mut self, + node_id: &NodeId, + browse_name: R, + display_name: S, + parent_node_id: &NodeId, + ) -> bool + where + R: Into, + S: Into, { self.assert_namespace(node_id); ObjectBuilder::new(node_id, browse_name, display_name) @@ -597,8 +869,15 @@ impl AddressSpace { } /// Adds a folder using a generated node id - pub fn add_folder(&mut self, browse_name: R, display_name: S, parent_node_id: &NodeId) -> Result - where R: Into, S: Into + pub fn add_folder( + &mut self, + browse_name: R, + display_name: S, + parent_node_id: &NodeId, + ) -> Result + where + R: Into, + S: Into, { let node_id = NodeId::next_numeric(self.default_namespace); self.assert_namespace(&node_id); @@ -610,12 +889,24 @@ impl AddressSpace { } /// Adds a list of variables to the specified parent node - pub fn add_variables(&mut self, variables: Vec, parent_node_id: &NodeId) -> Vec { - let result = variables.into_iter().map(|v| { - self.insert(v, Some(&[ - (&parent_node_id, &ReferenceTypeId::Organizes, ReferenceDirection::Inverse), - ])) - }).collect(); + pub fn add_variables( + &mut self, + variables: Vec, + parent_node_id: &NodeId, + ) -> Vec { + let result = variables + .into_iter() + .map(|v| { + self.insert( + v, + Some(&[( + &parent_node_id, + &ReferenceTypeId::Organizes, + ReferenceDirection::Inverse, + )]), + ) + }) + .collect(); self.update_last_modified(); result } @@ -642,17 +933,32 @@ impl AddressSpace { } /// Finds the matching reference and deletes it - pub fn delete_reference(&mut self, node_id: &NodeId, target_node_id: &NodeId, reference_type_id: T) -> bool where T: Into { - self.references.delete_reference(node_id, target_node_id, reference_type_id) + pub fn delete_reference( + &mut self, + node_id: &NodeId, + target_node_id: &NodeId, + reference_type_id: T, + ) -> bool + where + T: Into, + { + self.references + .delete_reference(node_id, target_node_id, reference_type_id) } /// Find node by something that can be turned into a node id and return a reference to it. - pub fn find(&self, node_id: N) -> Option<&NodeType> where N: Into { + pub fn find(&self, node_id: N) -> Option<&NodeType> + where + N: Into, + { self.find_node(&node_id.into()) } /// Find node by something that can be turned into a node id and return a mutable reference to it. - pub fn find_mut(&mut self, node_id: N) -> Option<&mut NodeType> where N: Into { + pub fn find_mut(&mut self, node_id: N) -> Option<&mut NodeType> + where + N: Into, + { self.find_node_mut(&node_id.into()) } @@ -668,7 +974,10 @@ impl AddressSpace { /// Find and return a variable with the specified node id or return None if it cannot be /// found or is not a variable - pub fn find_variable(&self, node_id: N) -> Option<&Variable> where N: Into { + pub fn find_variable(&self, node_id: N) -> Option<&Variable> + where + N: Into, + { self.find_variable_by_ref(&node_id.into()) } @@ -680,7 +989,10 @@ impl AddressSpace { /// Find and return a variable with the specified node id or return None if it cannot be /// found or is not a variable - pub fn find_variable_mut(&mut self, node_id: N) -> Option<&mut Variable> where N: Into { + pub fn find_variable_mut(&mut self, node_id: N) -> Option<&mut Variable> + where + N: Into, + { self.find_variable_mut_by_ref(&node_id.into()) } @@ -692,17 +1004,39 @@ impl AddressSpace { /// Set a variable value from its NodeId. The function will return false if the variable does /// not exist, or the node is not a variable. - pub fn set_variable_value(&mut self, node_id: N, value: V, source_timestamp: &DateTime, server_timestamp: &DateTime) -> bool - where N: Into, V: Into { + pub fn set_variable_value( + &mut self, + node_id: N, + value: V, + source_timestamp: &DateTime, + server_timestamp: &DateTime, + ) -> bool + where + N: Into, + V: Into, + { self.set_variable_value_by_ref(&node_id.into(), value, source_timestamp, server_timestamp) } /// Set a variable value from its NodeId. The function will return false if the variable does /// not exist, or the node is not a variable. - pub fn set_variable_value_by_ref(&mut self, node_id: &NodeId, value: V, source_timestamp: &DateTime, server_timestamp: &DateTime) -> bool - where V: Into { + pub fn set_variable_value_by_ref( + &mut self, + node_id: &NodeId, + value: V, + source_timestamp: &DateTime, + server_timestamp: &DateTime, + ) -> bool + where + V: Into, + { if let Some(ref mut variable) = self.find_variable_mut_by_ref(node_id) { - let _ = variable.set_value_direct(value, StatusCode::Good, source_timestamp, server_timestamp); + let _ = variable.set_value_direct( + value, + StatusCode::Good, + source_timestamp, + server_timestamp, + ); true } else { false @@ -711,20 +1045,33 @@ impl AddressSpace { /// Gets a variable value with the supplied NodeId. The function will return Err if the /// NodeId does not exist or is not a variable. - pub fn get_variable_value(&self, node_id: N) -> Result where N: Into { + pub fn get_variable_value(&self, node_id: N) -> Result + where + N: Into, + { self.find_variable(node_id) - .map(|variable| variable.value(TimestampsToReturn::Neither, NumericRange::None, &QualifiedName::null(), 0.0)) + .map(|variable| { + variable.value( + TimestampsToReturn::Neither, + NumericRange::None, + &QualifiedName::null(), + 0.0, + ) + }) .ok_or_else(|| ()) } /// Registers a method callback on the specified object id and method id - pub fn register_method_handler(&mut self, method_id: N, handler: MethodCallback) where N: Into { + pub fn register_method_handler(&mut self, method_id: N, handler: MethodCallback) + where + N: Into, + { // Check the object id and method id actually exist as things in the address space let method_id = method_id.into(); if let Some(method) = self.find_mut(&method_id) { match method { NodeType::Method(method) => method.set_callback(handler), - _ => panic!("{} is not a method node", method_id) + _ => panic!("{} is not a method node", method_id), } } else { panic!("{} method id does not exist", method_id); @@ -734,7 +1081,11 @@ impl AddressSpace { /// Test if the type definition is defined and valid for a class of the specified type. /// i.e. if we have a Variable or Object class that the type is a VariableType or ObjectType /// respectively. - pub fn is_valid_type_definition(&self, node_class: NodeClass, type_definition: &NodeId) -> bool { + pub fn is_valid_type_definition( + &self, + node_class: NodeClass, + type_definition: &NodeId, + ) -> bool { match node_class { NodeClass::Object => { if type_definition.is_null() { @@ -771,10 +1122,17 @@ impl AddressSpace { } /// Test if a reference relationship exists between one node and another node - pub fn has_reference(&self, source_node: &NodeId, target_node: &NodeId, reference_type: T) -> bool - where T: Into + pub fn has_reference( + &self, + source_node: &NodeId, + target_node: &NodeId, + reference_type: T, + ) -> bool + where + T: Into, { - self.references.has_reference(source_node, target_node, reference_type) + self.references + .has_reference(source_node, target_node, reference_type) } /// Tests if a method exists on a specific object. This will be true if the method id is @@ -795,23 +1153,37 @@ impl AddressSpace { /// /// Calls require a registered handler to handle the method. If there is no handler, or if /// the request refers to a non existent object / method, the function will return an error. - pub fn call_method(&mut self, _server_state: &ServerState, session: &mut Session, request: &CallMethodRequest) -> Result { + pub fn call_method( + &mut self, + _server_state: &ServerState, + session: &mut Session, + request: &CallMethodRequest, + ) -> Result { let (object_id, method_id) = (&request.object_id, &request.method_id); // Handle the call if !is_object!(self, object_id) { - error!("Method call to {:?} on {:?} but the node id is not recognized!", method_id, object_id); + error!( + "Method call to {:?} on {:?} but the node id is not recognized!", + method_id, object_id + ); Err(StatusCode::BadNodeIdUnknown) } else if !is_method!(self, method_id) { - error!("Method call to {:?} on {:?} but the method id is not recognized!", method_id, object_id); + error!( + "Method call to {:?} on {:?} but the method id is not recognized!", + method_id, object_id + ); Err(StatusCode::BadMethodInvalid) } else if !self.method_exists_on_object(object_id, method_id) { - error!("Method call to {:?} on {:?} but the method does not exist on the object!", method_id, object_id); + error!( + "Method call to {:?} on {:?} but the method does not exist on the object!", + method_id, object_id + ); Err(StatusCode::BadMethodInvalid) } else if let Some(method) = self.find_mut(method_id) { // TODO check security - session / user may not have permission to call methods match method { NodeType::Method(method) => method.call(session, request), - _ => Err(StatusCode::BadMethodInvalid) + _ => Err(StatusCode::BadMethodInvalid), } } else { Err(StatusCode::BadMethodInvalid) @@ -823,31 +1195,52 @@ impl AddressSpace { pub fn is_subtype(&self, subtype_id: &NodeId, base_type_id: &NodeId) -> bool { subtype_id == base_type_id || { // Apply same test to all children of the base type - if let Some(references) = self.find_references(base_type_id, Some((ReferenceTypeId::HasSubtype, false))) { + if let Some(references) = + self.find_references(base_type_id, Some((ReferenceTypeId::HasSubtype, false))) + { // Each child will test if it is the parent / match for the subtype - references.iter().find(|r| self.is_subtype(subtype_id, &r.target_node)).is_some() + references + .iter() + .find(|r| self.is_subtype(subtype_id, &r.target_node)) + .is_some() } else { false } } } /// Finds objects by a specified type. - fn find_nodes_by_type(&self, node_type_class: NodeClass, node_type_id: T, include_subtypes: bool) -> Option> where T: Into { + fn find_nodes_by_type( + &self, + node_type_class: NodeClass, + node_type_id: T, + include_subtypes: bool, + ) -> Option> + where + T: Into, + { let node_type_id = node_type_id.into(); // Ensure the node type is of the right class if let Some(node) = self.node_map.get(&node_type_id) { if node.node_class() == node_type_class { // Find nodes with a matching type definition - let nodes = self.node_map.iter() + let nodes = self + .node_map + .iter() .filter(|(_, v)| v.node_class() == NodeClass::Object) .filter(move |(k, _)| { // Node has to have a type definition reference to the type - if let Some(type_refs) = self.find_references(k, Some((ReferenceTypeId::HasTypeDefinition, false))) { + if let Some(type_refs) = self + .find_references(k, Some((ReferenceTypeId::HasTypeDefinition, false))) + { // Type definition must find the sought after type - type_refs.iter().find(|r| { - include_subtypes && self.is_subtype(&node_type_id, &r.target_node) || - r.target_node == node_type_id - }).is_some() + type_refs + .iter() + .find(|r| { + include_subtypes + && self.is_subtype(&node_type_id, &r.target_node) + || r.target_node == node_type_id + }) + .is_some() } else { false } @@ -864,16 +1257,33 @@ impl AddressSpace { None } } else { - debug!("Cannot find nodes by type because node type id {:?} does not exist", node_type_id); + debug!( + "Cannot find nodes by type because node type id {:?} does not exist", + node_type_id + ); None } } - pub fn find_objects_by_type(&self, object_type: T, include_subtypes: bool) -> Option> where T: Into { + pub fn find_objects_by_type( + &self, + object_type: T, + include_subtypes: bool, + ) -> Option> + where + T: Into, + { self.find_nodes_by_type(NodeClass::ObjectType, object_type, include_subtypes) } - pub fn find_variables_by_type(&self, variable_type: T, include_subtypes: bool) -> Option> where T: Into { + pub fn find_variables_by_type( + &self, + variable_type: T, + include_subtypes: bool, + ) -> Option> + where + T: Into, + { self.find_nodes_by_type(NodeClass::VariableType, variable_type, include_subtypes) } @@ -881,43 +1291,74 @@ impl AddressSpace { pub fn find_aggregates_of(&self, parent_node: &NodeId) -> Option> { self.find_references(parent_node, Some((ReferenceTypeId::Aggregates, true))) .map(|references| { - references.iter().map(|r| { - // debug!("reference {:?}", r); - r.target_node.clone() - }).collect() + references + .iter() + .map(|r| { + // debug!("reference {:?}", r); + r.target_node.clone() + }) + .collect() }) } /// Finds hierarchical references of the parent node, i.e. children, event sources, organizes etc from the parent node to other nodes. /// This function will return node ids even if the nodes themselves do not exist in the address space. pub fn find_hierarchical_references(&self, parent_node: &NodeId) -> Option> { - self.find_references(parent_node, Some((ReferenceTypeId::HierarchicalReferences, true))) - .map(|references| { - references.iter().map(|r| { + self.find_references( + parent_node, + Some((ReferenceTypeId::HierarchicalReferences, true)), + ) + .map(|references| { + references + .iter() + .map(|r| { // debug!("reference {:?}", r); r.target_node.clone() - }).collect() - }) + }) + .collect() + }) } /// Finds forward references from the specified node. The reference filter can optionally filter results /// by a specific type and subtypes. - pub fn find_references(&self, node: &NodeId, reference_filter: Option<(T, bool)>) -> Option> where T: Into + Clone { + pub fn find_references( + &self, + node: &NodeId, + reference_filter: Option<(T, bool)>, + ) -> Option> + where + T: Into + Clone, + { self.references.find_references(node, reference_filter) } /// Finds inverse references, it those that point to the specified node. The reference filter can /// optionally filter results by a specific type and subtypes. - pub fn find_inverse_references(&self, node: &NodeId, reference_filter: Option<(T, bool)>) -> Option> where T: Into + Clone { - self.references.find_inverse_references(node, reference_filter) + pub fn find_inverse_references( + &self, + node: &NodeId, + reference_filter: Option<(T, bool)>, + ) -> Option> + where + T: Into + Clone, + { + self.references + .find_inverse_references(node, reference_filter) } /// Finds references for optionally forwards, inverse or both and return the references. The usize /// represents the index in the collection where the inverse references start (if applicable) - pub fn find_references_by_direction(&self, node_id: &NodeId, browse_direction: BrowseDirection, reference_filter: Option<(T, bool)>) -> (Vec, usize) - where T: Into + Clone + pub fn find_references_by_direction( + &self, + node_id: &NodeId, + browse_direction: BrowseDirection, + reference_filter: Option<(T, bool)>, + ) -> (Vec, usize) + where + T: Into + Clone, { - self.references.find_references_by_direction(node_id, browse_direction, reference_filter) + self.references + .find_references_by_direction(node_id, browse_direction, reference_filter) } /// Updates the last modified timestamp to now @@ -926,9 +1367,19 @@ impl AddressSpace { } /// Sets the getter for a variable node - fn set_variable_getter(&mut self, variable_id: N, getter: F) where + fn set_variable_getter(&mut self, variable_id: N, getter: F) + where N: Into, - F: FnMut(&NodeId, TimestampsToReturn, AttributeId, NumericRange, &QualifiedName, f64) -> Result, StatusCode> + Send + 'static + F: FnMut( + &NodeId, + TimestampsToReturn, + AttributeId, + NumericRange, + &QualifiedName, + f64, + ) -> Result, StatusCode> + + Send + + 'static, { if let Some(ref mut v) = self.find_variable_mut(variable_id) { let getter = AttrFnGetter::new(getter); diff --git a/server/src/address_space/base.rs b/server/src/address_space/base.rs index f587098ac..5216c4f5b 100644 --- a/server/src/address_space/base.rs +++ b/server/src/address_space/base.rs @@ -2,10 +2,7 @@ // SPDX-License-Identifier: MPL-2.0 // Copyright (C) 2017-2020 Adam Lock -use opcua_types::{ - *, - status_code::StatusCode, -}; +use opcua_types::{status_code::StatusCode, *}; use super::node::{Node, NodeBase}; @@ -58,7 +55,8 @@ impl NodeBase for Base { } fn write_mask(&self) -> Option { - self.write_mask.map(|write_mask| WriteMask::from_bits_truncate(write_mask)) + self.write_mask + .map(|write_mask| WriteMask::from_bits_truncate(write_mask)) } fn set_write_mask(&mut self, write_mask: WriteMask) { @@ -66,7 +64,8 @@ impl NodeBase for Base { } fn user_write_mask(&self) -> Option { - self.user_write_mask.map(|user_write_mask| WriteMask::from_bits_truncate(user_write_mask)) + self.user_write_mask + .map(|user_write_mask| WriteMask::from_bits_truncate(user_write_mask)) } fn set_user_write_mask(&mut self, user_write_mask: WriteMask) { @@ -75,7 +74,14 @@ impl NodeBase for Base { } impl Node for Base { - fn get_attribute_max_age(&self, timestamps_to_return: TimestampsToReturn, attribute_id: AttributeId, _index_range: NumericRange, _data_encoding: &QualifiedName, _max_age: f64) -> Option { + fn get_attribute_max_age( + &self, + timestamps_to_return: TimestampsToReturn, + attribute_id: AttributeId, + _index_range: NumericRange, + _data_encoding: &QualifiedName, + _max_age: f64, + ) -> Option { match attribute_id { AttributeId::NodeClass => Some((self.node_class as i32).into()), AttributeId::NodeId => Some(self.node_id().into()), @@ -84,13 +90,17 @@ impl Node for Base { AttributeId::Description => self.description().map(|description| description.into()), AttributeId::WriteMask => self.write_mask.map(|v| v.into()), AttributeId::UserWriteMask => self.user_write_mask.map(|v| v.into()), - _ => None + _ => None, } } /// Tries to set the attribute if its one of the common attribute, otherwise it returns the value /// for the subclass to handle. - fn set_attribute(&mut self, attribute_id: AttributeId, value: Variant) -> Result<(), StatusCode> { + fn set_attribute( + &mut self, + attribute_id: AttributeId, + value: Variant, + ) -> Result<(), StatusCode> { match attribute_id { AttributeId::NodeClass => { if let Variant::Int32(v) = value { @@ -103,7 +113,9 @@ impl Node for Base { 32 => NodeClass::ReferenceType, 64 => NodeClass::DataType, 128 => NodeClass::View, - _ => { return Ok(()); } + _ => { + return Ok(()); + } }; Ok(()) } else { @@ -158,17 +170,21 @@ impl Node for Base { Err(StatusCode::BadTypeMismatch) } } - _ => { - Err(StatusCode::BadAttributeIdInvalid) - } + _ => Err(StatusCode::BadAttributeIdInvalid), } } } impl Base { - pub fn new(node_class: NodeClass, node_id: &NodeId, browse_name: R, display_name: S) -> Base - where R: Into, - S: Into, + pub fn new( + node_class: NodeClass, + node_id: &NodeId, + browse_name: R, + display_name: S, + ) -> Base + where + R: Into, + S: Into, { Base { node_id: node_id.clone(), @@ -190,7 +206,10 @@ impl Base { self.node_id = node_id; } - pub fn set_browse_name(&mut self, browse_name: S) where S: Into { + pub fn set_browse_name(&mut self, browse_name: S) + where + S: Into, + { self.browse_name = browse_name.into(); } } diff --git a/server/src/address_space/data_type.rs b/server/src/address_space/data_type.rs index b5e4b51bc..2ad6f5daf 100644 --- a/server/src/address_space/data_type.rs +++ b/server/src/address_space/data_type.rs @@ -29,14 +29,31 @@ impl Default for DataType { node_base_impl!(DataType); impl Node for DataType { - fn get_attribute_max_age(&self, timestamps_to_return: TimestampsToReturn, attribute_id: AttributeId, index_range: NumericRange, data_encoding: &QualifiedName, max_age: f64) -> Option { + fn get_attribute_max_age( + &self, + timestamps_to_return: TimestampsToReturn, + attribute_id: AttributeId, + index_range: NumericRange, + data_encoding: &QualifiedName, + max_age: f64, + ) -> Option { match attribute_id { AttributeId::IsAbstract => Some(self.is_abstract().into()), - _ => self.base.get_attribute_max_age(timestamps_to_return, attribute_id, index_range, data_encoding, max_age) + _ => self.base.get_attribute_max_age( + timestamps_to_return, + attribute_id, + index_range, + data_encoding, + max_age, + ), } } - fn set_attribute(&mut self, attribute_id: AttributeId, value: Variant) -> Result<(), StatusCode> { + fn set_attribute( + &mut self, + attribute_id: AttributeId, + value: Variant, + ) -> Result<(), StatusCode> { match attribute_id { AttributeId::IsAbstract => { if let Variant::Boolean(v) = value { @@ -46,15 +63,21 @@ impl Node for DataType { Err(StatusCode::BadTypeMismatch) } } - _ => self.base.set_attribute(attribute_id, value) + _ => self.base.set_attribute(attribute_id, value), } } } impl DataType { - pub fn new(node_id: &NodeId, browse_name: R, display_name: S, is_abstract: bool) -> DataType - where R: Into, - S: Into, + pub fn new( + node_id: &NodeId, + browse_name: R, + display_name: S, + is_abstract: bool, + ) -> DataType + where + R: Into, + S: Into, { DataType { base: Base::new(NodeClass::DataType, node_id, browse_name, display_name), @@ -62,12 +85,22 @@ impl DataType { } } - pub fn from_attributes(node_id: &NodeId, browse_name: S, attributes: DataTypeAttributes) -> Result - where S: Into + pub fn from_attributes( + node_id: &NodeId, + browse_name: S, + attributes: DataTypeAttributes, + ) -> Result + where + S: Into, { let mask = AttributesMask::from_bits(attributes.specified_attributes).ok_or(())?; if mask.contains(AttributesMask::DISPLAY_NAME | AttributesMask::IS_ABSTRACT) { - let mut node = Self::new(node_id, browse_name, attributes.display_name, attributes.is_abstract); + let mut node = Self::new( + node_id, + browse_name, + attributes.display_name, + attributes.is_abstract, + ); if mask.contains(AttributesMask::DESCRIPTION) { node.set_description(attributes.description); } diff --git a/server/src/address_space/method.rs b/server/src/address_space/method.rs index 7d3990f8e..eee2093c8 100644 --- a/server/src/address_space/method.rs +++ b/server/src/address_space/method.rs @@ -41,12 +41,23 @@ impl MethodBuilder { } fn args_to_variant(arguments: &[Argument]) -> Vec { - arguments.iter().map(|arg| { - Variant::from(ExtensionObject::from_encodable(ObjectId::Argument_Encoding_DefaultBinary, arg)) - }).collect::>() - } - - fn insert_args(&self, args_name: &str, address_space: &mut AddressSpace, arguments: &[Argument]) { + arguments + .iter() + .map(|arg| { + Variant::from(ExtensionObject::from_encodable( + ObjectId::Argument_Encoding_DefaultBinary, + arg, + )) + }) + .collect::>() + } + + fn insert_args( + &self, + args_name: &str, + address_space: &mut AddressSpace, + arguments: &[Argument], + ) { let fn_node_id = self.node.node_id(); let args_id = NodeId::next_numeric(fn_node_id.namespace); let args_value = Self::args_to_variant(arguments); @@ -86,15 +97,32 @@ impl Default for Method { node_base_impl!(Method); impl Node for Method { - fn get_attribute_max_age(&self, timestamps_to_return: TimestampsToReturn, attribute_id: AttributeId, index_range: NumericRange, data_encoding: &QualifiedName, max_age: f64) -> Option { + fn get_attribute_max_age( + &self, + timestamps_to_return: TimestampsToReturn, + attribute_id: AttributeId, + index_range: NumericRange, + data_encoding: &QualifiedName, + max_age: f64, + ) -> Option { match attribute_id { AttributeId::Executable => Some(self.executable().into()), AttributeId::UserExecutable => Some(self.user_executable().into()), - _ => self.base.get_attribute_max_age(timestamps_to_return, attribute_id, index_range, data_encoding, max_age) + _ => self.base.get_attribute_max_age( + timestamps_to_return, + attribute_id, + index_range, + data_encoding, + max_age, + ), } } - fn set_attribute(&mut self, attribute_id: AttributeId, value: Variant) -> Result<(), StatusCode> { + fn set_attribute( + &mut self, + attribute_id: AttributeId, + value: Variant, + ) -> Result<(), StatusCode> { match attribute_id { AttributeId::Executable => { if let Variant::Boolean(v) = value { @@ -112,15 +140,22 @@ impl Node for Method { Err(StatusCode::BadTypeMismatch) } } - _ => self.base.set_attribute(attribute_id, value) + _ => self.base.set_attribute(attribute_id, value), } } } impl Method { - pub fn new(node_id: &NodeId, browse_name: R, display_name: S, executable: bool, user_executable: bool) -> Method - where R: Into, - S: Into, + pub fn new( + node_id: &NodeId, + browse_name: R, + display_name: S, + executable: bool, + user_executable: bool, + ) -> Method + where + R: Into, + S: Into, { Method { base: Base::new(NodeClass::Method, node_id, browse_name, display_name), @@ -130,13 +165,26 @@ impl Method { } } - pub fn from_attributes(node_id: &NodeId, browse_name: S, attributes: MethodAttributes) -> Result - where S: Into + pub fn from_attributes( + node_id: &NodeId, + browse_name: S, + attributes: MethodAttributes, + ) -> Result + where + S: Into, { - let mandatory_attributes = AttributesMask::DISPLAY_NAME | AttributesMask::EXECUTABLE | AttributesMask::USER_EXECUTABLE; + let mandatory_attributes = AttributesMask::DISPLAY_NAME + | AttributesMask::EXECUTABLE + | AttributesMask::USER_EXECUTABLE; let mask = AttributesMask::from_bits(attributes.specified_attributes).ok_or(())?; if mask.contains(mandatory_attributes) { - let mut node = Self::new(node_id, browse_name, attributes.display_name, attributes.executable, attributes.user_executable); + let mut node = Self::new( + node_id, + browse_name, + attributes.display_name, + attributes.executable, + attributes.user_executable, + ); if mask.contains(AttributesMask::DESCRIPTION) { node.set_description(attributes.description); } @@ -184,12 +232,19 @@ impl Method { self.callback.is_some() } - pub fn call(&mut self, session: &mut Session, request: &CallMethodRequest) -> Result { + pub fn call( + &mut self, + session: &mut Session, + request: &CallMethodRequest, + ) -> Result { if let Some(ref mut callback) = self.callback { // Call the handler callback.call(session, request) } else { - error!("Method call to {} has no handler, treating as invalid", self.node_id()); + error!( + "Method call to {} has no handler, treating as invalid", + self.node_id() + ); Err(StatusCode::BadMethodInvalid) } } diff --git a/server/src/address_space/method_impls.rs b/server/src/address_space/method_impls.rs index 84eb50dbb..88db722ff 100644 --- a/server/src/address_space/method_impls.rs +++ b/server/src/address_space/method_impls.rs @@ -2,17 +2,17 @@ // SPDX-License-Identifier: MPL-2.0 // Copyright (C) 2017-2020 Adam Lock -use opcua_types::*; use opcua_types::service_types::{CallMethodRequest, CallMethodResult}; use opcua_types::status_code::StatusCode; +use opcua_types::*; -use crate::{ - callbacks::Method, - session::Session, -}; +use crate::{callbacks::Method, session::Session}; /// Count the number of provided input arguments, comparing them to the expected number. -fn ensure_input_argument_count(request: &CallMethodRequest, expected: usize) -> Result<(), StatusCode> { +fn ensure_input_argument_count( + request: &CallMethodRequest, + expected: usize, +) -> Result<(), StatusCode> { if let Some(ref input_arguments) = request.input_arguments { let actual = input_arguments.len(); if actual == expected { @@ -34,26 +34,27 @@ fn ensure_input_argument_count(request: &CallMethodRequest, expected: usize) -> /// Gets the input argument value, expecting it to the specified variant type. If it fails, /// it returns an error macro_rules! get_input_argument { - ( $request:expr, $index: expr, $variant_type: ident ) => { - { - let input_arguments = $request.input_arguments.as_ref().unwrap(); - let arg = input_arguments.get($index).unwrap(); - if let Variant::$variant_type(value) = arg { - Ok(value) - } - else { - // Argument is not the expected type - Err(StatusCode::BadInvalidArgument) - } + ( $request:expr, $index: expr, $variant_type: ident ) => {{ + let input_arguments = $request.input_arguments.as_ref().unwrap(); + let arg = input_arguments.get($index).unwrap(); + if let Variant::$variant_type(value) = arg { + Ok(value) + } else { + // Argument is not the expected type + Err(StatusCode::BadInvalidArgument) } - } + }}; } /// This is the handler for Server.ResendData method call. pub struct ServerResendDataMethod; impl Method for ServerResendDataMethod { - fn call(&mut self, session: &mut Session, request: &CallMethodRequest) -> Result { + fn call( + &mut self, + session: &mut Session, + request: &CallMethodRequest, + ) -> Result { debug!("Method handler for ResendData"); // OPC UA part 5 - ResendData([in] UInt32 subscriptionId); @@ -89,7 +90,11 @@ impl Method for ServerResendDataMethod { pub struct ServerGetMonitoredItemsMethod; impl Method for ServerGetMonitoredItemsMethod { - fn call(&mut self, session: &mut Session, request: &CallMethodRequest) -> Result { + fn call( + &mut self, + session: &mut Session, + request: &CallMethodRequest, + ) -> Result { debug!("Method handler for GetMonitoredItems"); // OPC UA part 5 - GetMonitoredItems([in] UInt32 subscriptionId, [out] UInt32[] serverHandles, [out] UInt32[] clientHandles); @@ -107,7 +112,11 @@ impl Method for ServerGetMonitoredItemsMethod { let subscription_id = get_input_argument!(request, 0, UInt32)?; - if let Some(subscription) = session.subscriptions().subscriptions().get(&subscription_id) { + if let Some(subscription) = session + .subscriptions() + .subscriptions() + .get(&subscription_id) + { // Response // serverHandles: Vec // clientHandles: Vec @@ -124,4 +133,4 @@ impl Method for ServerGetMonitoredItemsMethod { Err(StatusCode::BadSubscriptionIdInvalid) } } -} \ No newline at end of file +} diff --git a/server/src/address_space/mod.rs b/server/src/address_space/mod.rs index a85a3bc16..889c11090 100644 --- a/server/src/address_space/mod.rs +++ b/server/src/address_space/mod.rs @@ -10,26 +10,78 @@ use std::{ sync::{Arc, Mutex}, }; -use opcua_types::{AttributeId, DataValue, NodeId, NumericRange, QualifiedName, TimestampsToReturn}; use opcua_types::status_code::StatusCode; +use opcua_types::{ + AttributeId, DataValue, NodeId, NumericRange, QualifiedName, TimestampsToReturn, +}; use crate::callbacks::{AttributeGetter, AttributeSetter}; pub use self::address_space::AddressSpace; /// An implementation of attribute getter that can be easily constructed from a mutable function -pub struct AttrFnGetter where F: FnMut(&NodeId, TimestampsToReturn, AttributeId, NumericRange, &QualifiedName, f64) -> Result, StatusCode> + Send { - getter: F +pub struct AttrFnGetter +where + F: FnMut( + &NodeId, + TimestampsToReturn, + AttributeId, + NumericRange, + &QualifiedName, + f64, + ) -> Result, StatusCode> + + Send, +{ + getter: F, } -impl AttributeGetter for AttrFnGetter where F: FnMut(&NodeId, TimestampsToReturn, AttributeId, NumericRange, &QualifiedName, f64) -> Result, StatusCode> + Send { - fn get(&mut self, node_id: &NodeId, timestamps_to_return: TimestampsToReturn, attribute_id: AttributeId, index_range: NumericRange, data_encoding: &QualifiedName, max_age: f64) -> Result, StatusCode> { - (self.getter)(node_id, timestamps_to_return, attribute_id, index_range, data_encoding, max_age) +impl AttributeGetter for AttrFnGetter +where + F: FnMut( + &NodeId, + TimestampsToReturn, + AttributeId, + NumericRange, + &QualifiedName, + f64, + ) -> Result, StatusCode> + + Send, +{ + fn get( + &mut self, + node_id: &NodeId, + timestamps_to_return: TimestampsToReturn, + attribute_id: AttributeId, + index_range: NumericRange, + data_encoding: &QualifiedName, + max_age: f64, + ) -> Result, StatusCode> { + (self.getter)( + node_id, + timestamps_to_return, + attribute_id, + index_range, + data_encoding, + max_age, + ) } } -impl AttrFnGetter where F: FnMut(&NodeId, TimestampsToReturn, AttributeId, NumericRange, &QualifiedName, f64) -> Result, StatusCode> + Send { - pub fn new(getter: F) -> AttrFnGetter { AttrFnGetter { getter } } +impl AttrFnGetter +where + F: FnMut( + &NodeId, + TimestampsToReturn, + AttributeId, + NumericRange, + &QualifiedName, + f64, + ) -> Result, StatusCode> + + Send, +{ + pub fn new(getter: F) -> AttrFnGetter { + AttrFnGetter { getter } + } pub fn new_boxed(getter: F) -> Arc>> { Arc::new(Mutex::new(Self::new(getter))) @@ -37,18 +89,35 @@ impl AttrFnGetter where F: FnMut(&NodeId, TimestampsToReturn, AttributeId, } /// An implementation of attribute setter that can be easily constructed using a mutable function -pub struct AttrFnSetter where F: FnMut(&NodeId, AttributeId, NumericRange, DataValue) -> Result<(), StatusCode> + Send { - setter: F +pub struct AttrFnSetter +where + F: FnMut(&NodeId, AttributeId, NumericRange, DataValue) -> Result<(), StatusCode> + Send, +{ + setter: F, } -impl AttributeSetter for AttrFnSetter where F: FnMut(&NodeId, AttributeId, NumericRange, DataValue) -> Result<(), StatusCode> + Send { - fn set(&mut self, node_id: &NodeId, attribute_id: AttributeId, index_range: NumericRange, data_value: DataValue) -> Result<(), StatusCode> { +impl AttributeSetter for AttrFnSetter +where + F: FnMut(&NodeId, AttributeId, NumericRange, DataValue) -> Result<(), StatusCode> + Send, +{ + fn set( + &mut self, + node_id: &NodeId, + attribute_id: AttributeId, + index_range: NumericRange, + data_value: DataValue, + ) -> Result<(), StatusCode> { (self.setter)(node_id, attribute_id, index_range, data_value) } } -impl AttrFnSetter where F: FnMut(&NodeId, AttributeId, NumericRange, DataValue) -> Result<(), StatusCode> + Send { - pub fn new(setter: F) -> AttrFnSetter { AttrFnSetter { setter } } +impl AttrFnSetter +where + F: FnMut(&NodeId, AttributeId, NumericRange, DataValue) -> Result<(), StatusCode> + Send, +{ + pub fn new(setter: F) -> AttrFnSetter { + AttrFnSetter { setter } + } pub fn new_boxed(setter: F) -> Arc>> { Arc::new(Mutex::new(Self::new(setter))) @@ -59,10 +128,7 @@ impl AttrFnSetter where F: FnMut(&NodeId, AttributeId, NumericRange, DataV // variables etc. macro_rules! node_builder_impl { ( $node_builder_ty:ident, $node_ty:ident ) => { - use $crate::address_space::{ - address_space::{AddressSpace}, - references::ReferenceDirection, - }; + use $crate::address_space::{address_space::AddressSpace, references::ReferenceDirection}; /// A builder for constructing a node of same name. This can be used as an easy way /// to create a node and the references it has to another node in a simple fashion. @@ -74,17 +140,18 @@ macro_rules! node_builder_impl { impl $node_builder_ty { /// Creates a builder for a node. All nodes are required to su pub fn new(node_id: &NodeId, browse_name: T, display_name: S) -> Self - where T: Into, - S: Into, + where + T: Into, + S: Into, { trace!("Creating a node using a builder, node id {}", node_id); Self { node: $node_ty::default(), references: Vec::with_capacity(10), } - .node_id(node_id.clone()) - .browse_name(browse_name) - .display_name(display_name) + .node_id(node_id.clone()) + .browse_name(browse_name) + .display_name(display_name) } pub fn get_node_id(&self) -> NodeId { @@ -96,12 +163,18 @@ macro_rules! node_builder_impl { self } - fn browse_name(mut self, browse_name: V) -> Self where V: Into { + fn browse_name(mut self, browse_name: V) -> Self + where + V: Into, + { let _ = self.node.base.set_browse_name(browse_name); self } - fn display_name(mut self, display_name: V) -> Self where V: Into { + fn display_name(mut self, display_name: V) -> Self + where + V: Into, + { self.node.set_display_name(display_name.into()); self } @@ -112,27 +185,54 @@ macro_rules! node_builder_impl { } /// Sets the description of the node - pub fn description(mut self, description: V) -> Self where V: Into{ + pub fn description(mut self, description: V) -> Self + where + V: Into, + { self.node.set_description(description.into()); self } /// Adds a reference to the node - pub fn reference(mut self, node_id: T, reference_type_id: ReferenceTypeId, reference_direction: ReferenceDirection) -> Self - where T: Into + pub fn reference( + mut self, + node_id: T, + reference_type_id: ReferenceTypeId, + reference_direction: ReferenceDirection, + ) -> Self + where + T: Into, { - self.references.push((node_id.into(), reference_type_id.into(), reference_direction)); + self.references.push(( + node_id.into(), + reference_type_id.into(), + reference_direction, + )); self } /// Indicates this node organizes another node by its id. - pub fn organizes(self, organizes_id: T) -> Self where T: Into { - self.reference(organizes_id, ReferenceTypeId::Organizes, ReferenceDirection::Forward) + pub fn organizes(self, organizes_id: T) -> Self + where + T: Into, + { + self.reference( + organizes_id, + ReferenceTypeId::Organizes, + ReferenceDirection::Forward, + ) } /// Indicates this node is organised by another node by its id - pub fn organized_by(self, organized_by_id: T) -> Self where T: Into { - self.reference(organized_by_id, ReferenceTypeId::Organizes, ReferenceDirection::Inverse) + pub fn organized_by(self, organized_by_id: T) -> Self + where + T: Into, + { + self.reference( + organized_by_id, + ReferenceTypeId::Organizes, + ReferenceDirection::Inverse, + ) } /// Yields a built node. This function will panic if the node is invalid. Note that @@ -142,7 +242,10 @@ macro_rules! node_builder_impl { if self.is_valid() { self.node } else { - panic!("The node is not valid, node id = {:?}", self.node.base.node_id()); + panic!( + "The node is not valid, node id = {:?}", + self.node.base.node_id() + ); } } @@ -151,84 +254,140 @@ macro_rules! node_builder_impl { pub fn insert(self, address_space: &mut AddressSpace) -> bool { if self.is_valid() { if !self.references.is_empty() { - let references = self.references.iter().map(|v| { - (&v.0, &v.1, v.2) - }).collect::>(); + let references = self + .references + .iter() + .map(|v| (&v.0, &v.1, v.2)) + .collect::>(); address_space.insert(self.node, Some(references.as_slice())) } else { address_space.insert::<$node_ty, ReferenceTypeId>(self.node, None) } } else { - panic!("The node is not valid, node id = {:?}", self.node.base.node_id()); + panic!( + "The node is not valid, node id = {:?}", + self.node.base.node_id() + ); } } } - } + }; } macro_rules! node_builder_impl_generates_event { ( $node_builder_ty:ident ) => { impl $node_builder_ty { - pub fn generates_event(self, event_type: T) -> Self where T: Into { - self.reference(event_type, ReferenceTypeId::GeneratesEvent, ReferenceDirection::Forward) + pub fn generates_event(self, event_type: T) -> Self + where + T: Into, + { + self.reference( + event_type, + ReferenceTypeId::GeneratesEvent, + ReferenceDirection::Forward, + ) } } - } + }; } macro_rules! node_builder_impl_subtype { ( $node_builder_ty:ident ) => { impl $node_builder_ty { - pub fn subtype_of(self, type_id: T) -> Self where T: Into { - self.reference(type_id, ReferenceTypeId::HasSubtype, ReferenceDirection::Inverse) + pub fn subtype_of(self, type_id: T) -> Self + where + T: Into, + { + self.reference( + type_id, + ReferenceTypeId::HasSubtype, + ReferenceDirection::Inverse, + ) } - pub fn has_subtype(self, subtype_id: T) -> Self where T: Into { - self.reference(subtype_id, ReferenceTypeId::HasSubtype, ReferenceDirection::Forward) + pub fn has_subtype(self, subtype_id: T) -> Self + where + T: Into, + { + self.reference( + subtype_id, + ReferenceTypeId::HasSubtype, + ReferenceDirection::Forward, + ) } } - } + }; } macro_rules! node_builder_impl_component_of { ( $node_builder_ty:ident ) => { impl $node_builder_ty { - pub fn component_of(self, component_of_id: T) -> Self where T: Into { - self.reference(component_of_id, ReferenceTypeId::HasComponent, ReferenceDirection::Inverse) + pub fn component_of(self, component_of_id: T) -> Self + where + T: Into, + { + self.reference( + component_of_id, + ReferenceTypeId::HasComponent, + ReferenceDirection::Inverse, + ) } - pub fn has_component(self, has_component_id: T) -> Self where T: Into { - self.reference(has_component_id, ReferenceTypeId::HasComponent, ReferenceDirection::Forward) + pub fn has_component(self, has_component_id: T) -> Self + where + T: Into, + { + self.reference( + has_component_id, + ReferenceTypeId::HasComponent, + ReferenceDirection::Forward, + ) } } - } + }; } macro_rules! node_builder_impl_property_of { ( $node_builder_ty:ident ) => { impl $node_builder_ty { - pub fn has_property(self, has_component_id: T) -> Self where T: Into { - self.reference(has_component_id, ReferenceTypeId::HasProperty, ReferenceDirection::Forward) + pub fn has_property(self, has_component_id: T) -> Self + where + T: Into, + { + self.reference( + has_component_id, + ReferenceTypeId::HasProperty, + ReferenceDirection::Forward, + ) } - pub fn property_of(self, component_of_id: T) -> Self where T: Into { - self.reference(component_of_id, ReferenceTypeId::HasProperty, ReferenceDirection::Inverse) + pub fn property_of(self, component_of_id: T) -> Self + where + T: Into, + { + self.reference( + component_of_id, + ReferenceTypeId::HasProperty, + ReferenceDirection::Inverse, + ) } } - } + }; } /// This is a sanity saving macro that implements the NodeBase trait for nodes. It assumes the /// node has a base: Base macro_rules! node_base_impl { ( $node_struct:ident ) => { - use opcua_types::*; - use opcua_types::status_code::StatusCode; - use opcua_types::service_types::NodeClass; use crate::address_space::node::NodeType; + use opcua_types::service_types::NodeClass; + use opcua_types::status_code::StatusCode; + use opcua_types::*; impl Into for $node_struct { - fn into(self) -> NodeType { NodeType::$node_struct(Box::new(self)) } + fn into(self) -> NodeType { + NodeType::$node_struct(Box::new(self)) + } } impl NodeBase for $node_struct { @@ -276,22 +435,22 @@ macro_rules! node_base_impl { self.base.set_user_write_mask(user_write_mask) } } - } + }; } pub mod address_space; pub mod base; -pub mod relative_path; -pub mod object; -pub mod variable; +pub mod data_type; pub mod method; pub mod node; -pub mod reference_type; +pub mod object; pub mod object_type; +pub mod reference_type; +pub mod references; +pub mod relative_path; +pub mod variable; pub mod variable_type; -pub mod data_type; pub mod view; -pub mod references; #[cfg(feature = "generated-address-space")] mod generated; @@ -332,7 +491,6 @@ bitflags! { } pub mod types { - pub use super::{AttrFnGetter, AttrFnSetter}; pub use super::address_space::AddressSpace; pub use super::data_type::{DataType, DataTypeBuilder}; pub use super::method::{Method, MethodBuilder}; @@ -344,5 +502,5 @@ pub mod types { pub use super::variable::{Variable, VariableBuilder}; pub use super::variable_type::{VariableType, VariableTypeBuilder}; pub use super::view::{View, ViewBuilder}; + pub use super::{AttrFnGetter, AttrFnSetter}; } - diff --git a/server/src/address_space/node.rs b/server/src/address_space/node.rs index 6a2f1b932..aff638c37 100644 --- a/server/src/address_space/node.rs +++ b/server/src/address_space/node.rs @@ -3,12 +3,12 @@ // Copyright (C) 2017-2020 Adam Lock use opcua_types::{ - AttributeId, DataValue, LocalizedText, NodeId, NumericRange, QualifiedName, service_types::NodeClass, status_code::StatusCode, TimestampsToReturn, - Variant, WriteMask, + service_types::NodeClass, status_code::StatusCode, AttributeId, DataValue, LocalizedText, + NodeId, NumericRange, QualifiedName, TimestampsToReturn, Variant, WriteMask, }; -use crate::{ - address_space::types::{Object, ObjectType, ReferenceType, Variable, VariableType, View, DataType, Method}, +use crate::address_space::types::{ + DataType, Method, Object, ObjectType, ReferenceType, Variable, VariableType, View, }; /// A `NodeType` is an enumeration holding every kind of node which can be hosted within the `AddressSpace`. @@ -118,13 +118,36 @@ pub trait Node: NodeBase { /// /// If there is a getter registered with the node, then the getter will interpret /// `max_age` how it sees fit. - fn get_attribute_max_age(&self, timestamps_to_return: TimestampsToReturn, attribute_id: AttributeId, index_range: NumericRange, data_encoding: &QualifiedName, max_age: f64) -> Option; + fn get_attribute_max_age( + &self, + timestamps_to_return: TimestampsToReturn, + attribute_id: AttributeId, + index_range: NumericRange, + data_encoding: &QualifiedName, + max_age: f64, + ) -> Option; /// Finds the attribute and value. - fn get_attribute(&self, timestamps_to_return: TimestampsToReturn, attribute_id: AttributeId, index_range: NumericRange, data_encoding: &QualifiedName) -> Option { - self.get_attribute_max_age(timestamps_to_return, attribute_id, index_range, data_encoding, 0f64) + fn get_attribute( + &self, + timestamps_to_return: TimestampsToReturn, + attribute_id: AttributeId, + index_range: NumericRange, + data_encoding: &QualifiedName, + ) -> Option { + self.get_attribute_max_age( + timestamps_to_return, + attribute_id, + index_range, + data_encoding, + 0f64, + ) } /// Sets the attribute with the new value - fn set_attribute(&mut self, attribute_id: AttributeId, value: Variant) -> Result<(), StatusCode>; + fn set_attribute( + &mut self, + attribute_id: AttributeId, + value: Variant, + ) -> Result<(), StatusCode>; } diff --git a/server/src/address_space/object.rs b/server/src/address_space/object.rs index 944f1b965..d8cc9cb01 100644 --- a/server/src/address_space/object.rs +++ b/server/src/address_space/object.rs @@ -6,10 +6,7 @@ use opcua_types::service_types::ObjectAttributes; -use crate::address_space::{ - base::Base, - EventNotifier, node::Node, node::NodeBase, -}; +use crate::address_space::{base::Base, node::Node, node::NodeBase, EventNotifier}; node_builder_impl!(ObjectBuilder, Object); node_builder_impl_component_of!(ObjectBuilder); @@ -25,12 +22,26 @@ impl ObjectBuilder { self } - pub fn has_type_definition(self, type_id: T) -> Self where T: Into { - self.reference(type_id, ReferenceTypeId::HasTypeDefinition, ReferenceDirection::Forward) + pub fn has_type_definition(self, type_id: T) -> Self + where + T: Into, + { + self.reference( + type_id, + ReferenceTypeId::HasTypeDefinition, + ReferenceDirection::Forward, + ) } - pub fn has_event_source(self, source_id: T) -> Self where T: Into { - self.reference(source_id, ReferenceTypeId::HasEventSource, ReferenceDirection::Forward) + pub fn has_event_source(self, source_id: T) -> Self + where + T: Into, + { + self.reference( + source_id, + ReferenceTypeId::HasEventSource, + ReferenceDirection::Forward, + ) } } @@ -53,14 +64,31 @@ impl Default for Object { node_base_impl!(Object); impl Node for Object { - fn get_attribute_max_age(&self, timestamps_to_return: TimestampsToReturn, attribute_id: AttributeId, index_range: NumericRange, data_encoding: &QualifiedName, max_age: f64) -> Option { + fn get_attribute_max_age( + &self, + timestamps_to_return: TimestampsToReturn, + attribute_id: AttributeId, + index_range: NumericRange, + data_encoding: &QualifiedName, + max_age: f64, + ) -> Option { match attribute_id { AttributeId::EventNotifier => Some(self.event_notifier().bits().into()), - _ => self.base.get_attribute_max_age(timestamps_to_return, attribute_id, index_range, data_encoding, max_age) + _ => self.base.get_attribute_max_age( + timestamps_to_return, + attribute_id, + index_range, + data_encoding, + max_age, + ), } } - fn set_attribute(&mut self, attribute_id: AttributeId, value: Variant) -> Result<(), StatusCode> { + fn set_attribute( + &mut self, + attribute_id: AttributeId, + value: Variant, + ) -> Result<(), StatusCode> { match attribute_id { AttributeId::EventNotifier => { if let Variant::Byte(v) = value { @@ -70,15 +98,21 @@ impl Node for Object { Err(StatusCode::BadTypeMismatch) } } - _ => self.base.set_attribute(attribute_id, value) + _ => self.base.set_attribute(attribute_id, value), } } } impl Object { - pub fn new(node_id: &NodeId, browse_name: R, display_name: S, event_notifier: EventNotifier) -> Object - where R: Into, - S: Into, + pub fn new( + node_id: &NodeId, + browse_name: R, + display_name: S, + event_notifier: EventNotifier, + ) -> Object + where + R: Into, + S: Into, { Object { base: Base::new(NodeClass::Object, node_id, browse_name, display_name), @@ -86,15 +120,25 @@ impl Object { } } - pub fn from_attributes(node_id: &NodeId, browse_name: S, attributes: ObjectAttributes) -> Result - where S: Into + pub fn from_attributes( + node_id: &NodeId, + browse_name: S, + attributes: ObjectAttributes, + ) -> Result + where + S: Into, { let mandatory_attributes = AttributesMask::DISPLAY_NAME | AttributesMask::EVENT_NOTIFIER; let mask = AttributesMask::from_bits(attributes.specified_attributes).ok_or(())?; if mask.contains(mandatory_attributes) { let event_notifier = EventNotifier::from_bits_truncate(attributes.event_notifier); - let mut node = Self::new(node_id, browse_name, attributes.display_name, event_notifier); + let mut node = Self::new( + node_id, + browse_name, + attributes.display_name, + event_notifier, + ); if mask.contains(AttributesMask::DESCRIPTION) { node.set_description(attributes.description); } @@ -123,4 +167,3 @@ impl Object { self.event_notifier = event_notifier; } } - diff --git a/server/src/address_space/object_type.rs b/server/src/address_space/object_type.rs index 1e845c0aa..3ca45ba61 100644 --- a/server/src/address_space/object_type.rs +++ b/server/src/address_space/object_type.rs @@ -41,14 +41,31 @@ impl Default for ObjectType { node_base_impl!(ObjectType); impl Node for ObjectType { - fn get_attribute_max_age(&self, timestamps_to_return: TimestampsToReturn, attribute_id: AttributeId, index_range: NumericRange, data_encoding: &QualifiedName, max_age: f64) -> Option { + fn get_attribute_max_age( + &self, + timestamps_to_return: TimestampsToReturn, + attribute_id: AttributeId, + index_range: NumericRange, + data_encoding: &QualifiedName, + max_age: f64, + ) -> Option { match attribute_id { AttributeId::IsAbstract => Some(self.is_abstract().into()), - _ => self.base.get_attribute_max_age(timestamps_to_return, attribute_id, index_range, data_encoding, max_age) + _ => self.base.get_attribute_max_age( + timestamps_to_return, + attribute_id, + index_range, + data_encoding, + max_age, + ), } } - fn set_attribute(&mut self, attribute_id: AttributeId, value: Variant) -> Result<(), StatusCode> { + fn set_attribute( + &mut self, + attribute_id: AttributeId, + value: Variant, + ) -> Result<(), StatusCode> { match attribute_id { AttributeId::IsAbstract => { if let Variant::Boolean(v) = value { @@ -58,16 +75,21 @@ impl Node for ObjectType { Err(StatusCode::BadTypeMismatch) } } - _ => self.base.set_attribute(attribute_id, value) + _ => self.base.set_attribute(attribute_id, value), } } } - impl ObjectType { - pub fn new(node_id: &NodeId, browse_name: R, display_name: S, is_abstract: bool) -> ObjectType - where R: Into, - S: Into, + pub fn new( + node_id: &NodeId, + browse_name: R, + display_name: S, + is_abstract: bool, + ) -> ObjectType + where + R: Into, + S: Into, { ObjectType { base: Base::new(NodeClass::ObjectType, node_id, browse_name, display_name), @@ -75,13 +97,23 @@ impl ObjectType { } } - pub fn from_attributes(node_id: &NodeId, browse_name: S, attributes: ObjectTypeAttributes) -> Result - where S: Into + pub fn from_attributes( + node_id: &NodeId, + browse_name: S, + attributes: ObjectTypeAttributes, + ) -> Result + where + S: Into, { let mandatory_attributes = AttributesMask::DISPLAY_NAME | AttributesMask::IS_ABSTRACT; let mask = AttributesMask::from_bits(attributes.specified_attributes).ok_or(())?; if mask.contains(mandatory_attributes) { - let mut node = Self::new(node_id, browse_name, attributes.display_name, attributes.is_abstract); + let mut node = Self::new( + node_id, + browse_name, + attributes.display_name, + attributes.is_abstract, + ); if mask.contains(AttributesMask::DESCRIPTION) { node.set_description(attributes.description); } @@ -109,4 +141,4 @@ impl ObjectType { pub fn set_is_abstract(&mut self, is_abstract: bool) { self.is_abstract = is_abstract; } -} \ No newline at end of file +} diff --git a/server/src/address_space/reference_type.rs b/server/src/address_space/reference_type.rs index 3fe75ccb9..1fb27e8ce 100644 --- a/server/src/address_space/reference_type.rs +++ b/server/src/address_space/reference_type.rs @@ -34,16 +34,33 @@ impl Default for ReferenceType { node_base_impl!(ReferenceType); impl Node for ReferenceType { - fn get_attribute_max_age(&self, timestamps_to_return: TimestampsToReturn, attribute_id: AttributeId, index_range: NumericRange, data_encoding: &QualifiedName, max_age: f64) -> Option { + fn get_attribute_max_age( + &self, + timestamps_to_return: TimestampsToReturn, + attribute_id: AttributeId, + index_range: NumericRange, + data_encoding: &QualifiedName, + max_age: f64, + ) -> Option { match attribute_id { AttributeId::Symmetric => Some(self.symmetric().into()), AttributeId::IsAbstract => Some(self.is_abstract().into()), AttributeId::InverseName => self.inverse_name().map(|v| v.into()), - _ => self.base.get_attribute_max_age(timestamps_to_return, attribute_id, index_range, data_encoding, max_age) + _ => self.base.get_attribute_max_age( + timestamps_to_return, + attribute_id, + index_range, + data_encoding, + max_age, + ), } } - fn set_attribute(&mut self, attribute_id: AttributeId, value: Variant) -> Result<(), StatusCode> { + fn set_attribute( + &mut self, + attribute_id: AttributeId, + value: Variant, + ) -> Result<(), StatusCode> { match attribute_id { AttributeId::Symmetric => { if let Variant::Boolean(v) = value { @@ -69,15 +86,23 @@ impl Node for ReferenceType { Err(StatusCode::BadTypeMismatch) } } - _ => self.base.set_attribute(attribute_id, value) + _ => self.base.set_attribute(attribute_id, value), } } } impl ReferenceType { - pub fn new(node_id: &NodeId, browse_name: R, display_name: S, inverse_name: Option, symmetric: bool, is_abstract: bool) -> ReferenceType - where R: Into, - S: Into, + pub fn new( + node_id: &NodeId, + browse_name: R, + display_name: S, + inverse_name: Option, + symmetric: bool, + is_abstract: bool, + ) -> ReferenceType + where + R: Into, + S: Into, { ReferenceType { base: Base::new(NodeClass::ReferenceType, node_id, browse_name, display_name), @@ -87,13 +112,26 @@ impl ReferenceType { } } - pub fn from_attributes(node_id: &NodeId, browse_name: S, attributes: ReferenceTypeAttributes) -> Result - where S: Into + pub fn from_attributes( + node_id: &NodeId, + browse_name: S, + attributes: ReferenceTypeAttributes, + ) -> Result + where + S: Into, { - let mandatory_attributes = AttributesMask::DISPLAY_NAME | AttributesMask::IS_ABSTRACT | AttributesMask::SYMMETRIC; + let mandatory_attributes = + AttributesMask::DISPLAY_NAME | AttributesMask::IS_ABSTRACT | AttributesMask::SYMMETRIC; let mask = AttributesMask::from_bits(attributes.specified_attributes).ok_or(())?; if mask.contains(mandatory_attributes) { - let mut node = Self::new(node_id, browse_name, attributes.display_name, None, false, false); + let mut node = Self::new( + node_id, + browse_name, + attributes.display_name, + None, + false, + false, + ); if mask.contains(AttributesMask::DESCRIPTION) { node.set_description(attributes.description); } diff --git a/server/src/address_space/references.rs b/server/src/address_space/references.rs index aa1afc68c..5f81db4e5 100644 --- a/server/src/address_space/references.rs +++ b/server/src/address_space/references.rs @@ -15,7 +15,10 @@ pub struct Reference { } impl Reference { - pub fn new(reference_type: T, target_node: NodeId) -> Reference where T: Into { + pub fn new(reference_type: T, target_node: NodeId) -> Reference + where + T: Into, + { Reference { reference_type: reference_type.into(), target_node, @@ -50,8 +53,12 @@ impl Default for References { impl References { /// Inserts a single reference into the map. - pub fn insert(&mut self, source_node: &NodeId, references: &[(&NodeId, &T, ReferenceDirection)]) - where T: Into + Clone + pub fn insert( + &mut self, + source_node: &NodeId, + references: &[(&NodeId, &T, ReferenceDirection)], + ) where + T: Into + Clone, { references.iter().for_each(|r| { // An inverse reference will flip the nodes around @@ -73,34 +80,58 @@ impl References { } else if self.references_map.contains_key(node_id) { debug!("Node {} is a key in references_from_map", node_id); true - } else if self.references_map.iter().find(|(k, v)| { - if let Some(r) = v.iter().find(|r| r.target_node == *node_id) { - debug!("Node {} is a value in references_from_map[{}, reference = {:?}", node_id, k, r); - true - } else { - false - } - }).is_some() { + } else if self + .references_map + .iter() + .find(|(k, v)| { + if let Some(r) = v.iter().find(|r| r.target_node == *node_id) { + debug!( + "Node {} is a value in references_from_map[{}, reference = {:?}", + node_id, k, r + ); + true + } else { + false + } + }) + .is_some() + { true - } else if self.referenced_by_map.iter().find(|(k, v)| { - if v.contains(node_id) { - debug!("Node {} is a value in referenced_by_map, key {}", node_id, k); - true - } else { - false - } - }).is_some() { + } else if self + .referenced_by_map + .iter() + .find(|(k, v)| { + if v.contains(node_id) { + debug!( + "Node {} is a value in referenced_by_map, key {}", + node_id, k + ); + true + } else { + false + } + }) + .is_some() + { true } else { false } } - pub fn insert_reference(&mut self, source_node: &NodeId, target_node: &NodeId, reference_type: &T) - where T: Into + Clone + pub fn insert_reference( + &mut self, + source_node: &NodeId, + target_node: &NodeId, + reference_type: &T, + ) where + T: Into + Clone, { if source_node == target_node { - panic!("Node id from == node id to {}, self reference is not allowed", source_node); + panic!( + "Node id from == node id to {}, self reference is not allowed", + source_node + ); } let reference_type: NodeId = reference_type.clone().into(); @@ -125,40 +156,46 @@ impl References { } else { let mut lookup_set = HashSet::new(); lookup_set.insert(source_node.clone()); - self.referenced_by_map.insert(target_node.clone(), lookup_set); + self.referenced_by_map + .insert(target_node.clone(), lookup_set); } } /// Inserts references into the map. pub fn insert_references(&mut self, references: &[(&NodeId, &NodeId, &T)]) - where T: Into + Clone + where + T: Into + Clone, { references.iter().for_each(|r| { self.insert_reference(r.0, r.1, r.2); }); } - fn remove_node_from_referenced_nodes(&mut self, nodes_to_check: HashSet, node_to_remove: &NodeId) { + fn remove_node_from_referenced_nodes( + &mut self, + nodes_to_check: HashSet, + node_to_remove: &NodeId, + ) { nodes_to_check.into_iter().for_each(|node_to_check| { // Removes any references that refer from the node to check back to the node to remove - let remove_entry = if let Some(ref mut references) = self.references_map.get_mut(&node_to_check) { - references.retain(|r| { - r.target_node != *node_to_remove - }); - references.is_empty() - } else { - false - }; + let remove_entry = + if let Some(ref mut references) = self.references_map.get_mut(&node_to_check) { + references.retain(|r| r.target_node != *node_to_remove); + references.is_empty() + } else { + false + }; if remove_entry { self.references_map.remove(&node_to_check); } // Remove lookup that refer from the node to check back to the node to remove - let remove_lookup_map = if let Some(ref mut lookup_map) = self.referenced_by_map.get_mut(&node_to_check) { - lookup_map.remove(node_to_remove); - lookup_map.is_empty() - } else { - false - }; + let remove_lookup_map = + if let Some(ref mut lookup_map) = self.referenced_by_map.get_mut(&node_to_check) { + lookup_map.remove(node_to_remove); + lookup_map.is_empty() + } else { + false + }; if remove_lookup_map { self.referenced_by_map.remove(&node_to_check); } @@ -167,7 +204,15 @@ impl References { /// Deletes a matching references between one node and the target node of the specified /// reference type. The function returns true if the reference was found and deleted. - pub fn delete_reference(&mut self, source_node: &NodeId, target_node: &NodeId, reference_type: T) -> bool where T: Into { + pub fn delete_reference( + &mut self, + source_node: &NodeId, + target_node: &NodeId, + reference_type: T, + ) -> bool + where + T: Into, + { let reference_type = reference_type.into(); let mut deleted = false; @@ -175,7 +220,10 @@ impl References { // Remove the source node reference if let Some(references) = self.references_map.get_mut(source_node) { // Make a set of all the nodes that this node references - let other_nodes_before = references.iter().map(|r| r.target_node.clone()).collect::>(); + let other_nodes_before = references + .iter() + .map(|r| r.target_node.clone()) + .collect::>(); // Delete a reference references.retain(|r| { if r.reference_type == reference_type && r.target_node == *target_node { @@ -190,11 +238,17 @@ impl References { } // Make a set of all nodes that this node references (after removal) - let other_nodes_after = references.iter().map(|r| r.target_node.clone()).collect::>(); + let other_nodes_after = references + .iter() + .map(|r| r.target_node.clone()) + .collect::>(); // If nodes are no longer referenced, then the ones that were removed must also have their // references changed. - let difference = other_nodes_before.difference(&other_nodes_after).cloned().collect::>(); + let difference = other_nodes_before + .difference(&other_nodes_after) + .cloned() + .collect::>(); if !difference.is_empty() { self.remove_node_from_referenced_nodes(difference, source_node); } @@ -210,7 +264,10 @@ impl References { pub fn delete_node_references(&mut self, source_node: &NodeId) -> bool { let deleted_references = if let Some(references) = self.references_map.remove(source_node) { // Deleted every reference from the node, and clean up the reverse lookup map - let nodes_referenced = references.iter().map(|r| r.target_node.clone()).collect::>(); + let nodes_referenced = references + .iter() + .map(|r| r.target_node.clone()) + .collect::>(); self.remove_node_from_referenced_nodes(nodes_referenced, source_node); true } else { @@ -228,7 +285,15 @@ impl References { } /// Test if a reference relationship exists between one node and another node - pub fn has_reference(&self, source_node: &NodeId, target_node: &NodeId, reference_type: T) -> bool where T: Into { + pub fn has_reference( + &self, + source_node: &NodeId, + target_node: &NodeId, + reference_type: T, + ) -> bool + where + T: Into, + { if let Some(references) = self.references_map.get(&source_node) { let reference = Reference::new(reference_type.into(), target_node.clone()); references.contains(&reference) @@ -238,7 +303,14 @@ impl References { } /// Finds forward references from the node - pub fn find_references(&self, source_node: &NodeId, reference_filter: Option<(T, bool)>) -> Option> where T: Into + Clone { + pub fn find_references( + &self, + source_node: &NodeId, + reference_filter: Option<(T, bool)>, + ) -> Option> + where + T: Into + Clone, + { if let Some(ref node_references) = self.references_map.get(source_node) { let result = self.filter_references_by_type(node_references, &reference_filter); if result.is_empty() { @@ -254,22 +326,29 @@ impl References { /// Returns inverse references for the target node, i.e if there are references where /// `Reference.target_node` matches the supplied target node then return references /// where `Reference.target_node` is the source node. - pub fn find_inverse_references(&self, target_node: &NodeId, reference_filter: Option<(T, bool)>) -> Option> where T: Into + Clone { + pub fn find_inverse_references( + &self, + target_node: &NodeId, + reference_filter: Option<(T, bool)>, + ) -> Option> + where + T: Into + Clone, + { if let Some(lookup_map) = self.referenced_by_map.get(target_node) { // Iterate all nodes that reference this node, collecting their references let mut result = Vec::with_capacity(16); lookup_map.iter().for_each(|source_node| { if let Some(references) = self.references_map.get(source_node) { - let references = references.iter() + let references = references + .iter() .filter(|r| r.target_node == *target_node) - .map(|r| { - Reference { - reference_type: r.reference_type.clone(), - target_node: source_node.clone(), - } + .map(|r| Reference { + reference_type: r.reference_type.clone(), + target_node: source_node.clone(), }) .collect::>(); - let mut references = self.filter_references_by_type(&references, &reference_filter); + let mut references = + self.filter_references_by_type(&references, &reference_filter); if !references.is_empty() { result.append(&mut references); } @@ -285,13 +364,27 @@ impl References { } } - fn filter_references_by_type(&self, references: &Vec, reference_filter: &Option<(T, bool)>) -> Vec where T: Into + Clone { + fn filter_references_by_type( + &self, + references: &Vec, + reference_filter: &Option<(T, bool)>, + ) -> Vec + where + T: Into + Clone, + { match reference_filter { None => references.clone(), Some((reference_type_id, include_subtypes)) => { let reference_type_id = reference_type_id.clone().into(); - references.iter() - .filter(|r| self.reference_type_matches(&reference_type_id, &r.reference_type, *include_subtypes)) + references + .iter() + .filter(|r| { + self.reference_type_matches( + &reference_type_id, + &r.reference_type, + *include_subtypes, + ) + }) .cloned() .collect::>() } @@ -301,8 +394,14 @@ impl References { /// Find references optionally to and/or from the specified node id. The browse direction /// indicates the desired direction, or both. The reference filter indicates if only references /// of a certain type (including sub types) should be fetched. - pub fn find_references_by_direction(&self, node: &NodeId, browse_direction: BrowseDirection, reference_filter: Option<(T, bool)>) -> (Vec, usize) - where T: Into + Clone + pub fn find_references_by_direction( + &self, + node: &NodeId, + browse_direction: BrowseDirection, + reference_filter: Option<(T, bool)>, + ) -> (Vec, usize) + where + T: Into + Clone, { let mut references = Vec::new(); let inverse_ref_idx: usize; @@ -315,17 +414,26 @@ impl References { } BrowseDirection::Inverse => { inverse_ref_idx = 0; - if let Some(mut inverse_references) = self.find_inverse_references(node, reference_filter) { + if let Some(mut inverse_references) = + self.find_inverse_references(node, reference_filter) + { references.append(&mut inverse_references); } } BrowseDirection::Both => { - let reference_filter: Option<(NodeId, bool)> = reference_filter.map(|(reference_type, include_subtypes)| (reference_type.into(), include_subtypes)); - if let Some(mut forward_references) = self.find_references(node, reference_filter.clone()) { + let reference_filter: Option<(NodeId, bool)> = + reference_filter.map(|(reference_type, include_subtypes)| { + (reference_type.into(), include_subtypes) + }); + if let Some(mut forward_references) = + self.find_references(node, reference_filter.clone()) + { references.append(&mut forward_references); } inverse_ref_idx = references.len(); - if let Some(mut inverse_references) = self.find_inverse_references(node, reference_filter) { + if let Some(mut inverse_references) = + self.find_inverse_references(node, reference_filter) + { references.append(&mut inverse_references); } } @@ -340,7 +448,12 @@ impl References { /// Test if a reference type matches another reference type which is potentially a subtype. /// If `include_subtypes` is set to true, the function will test if the subttype /// for a match. - pub fn reference_type_matches(&self, ref_type: &NodeId, ref_subtype: &NodeId, include_subtypes: bool) -> bool { + pub fn reference_type_matches( + &self, + ref_type: &NodeId, + ref_subtype: &NodeId, + include_subtypes: bool, + ) -> bool { if ref_type == ref_subtype { true } else if include_subtypes { @@ -357,7 +470,8 @@ impl References { found = true; break; } else if let Some(references) = self.references_map.get(¤t) { - let mut subtypes = references.iter() + let mut subtypes = references + .iter() .filter(|r| r.reference_type == has_subtype) .map(|r| r.target_node.clone()) .collect::>(); @@ -377,9 +491,10 @@ impl References { pub fn get_type_id(&self, node: &NodeId) -> Option { if let Some(references) = self.references_map.get(&node) { let has_type_definition_id = ReferenceTypeId::HasTypeDefinition.into(); - if let Some(reference) = references.iter().find(|r| { - r.reference_type == has_type_definition_id - }) { + if let Some(reference) = references + .iter() + .find(|r| r.reference_type == has_type_definition_id) + { Some(reference.target_node.clone()) } else { None @@ -388,4 +503,4 @@ impl References { None } } -} \ No newline at end of file +} diff --git a/server/src/address_space/relative_path.rs b/server/src/address_space/relative_path.rs index d4b442cf9..a9e7e4054 100644 --- a/server/src/address_space/relative_path.rs +++ b/server/src/address_space/relative_path.rs @@ -6,20 +6,22 @@ use std::collections::HashSet; use opcua_types::{ node_id::NodeId, - QualifiedName, service_types::{RelativePath, RelativePathElement}, status_code::StatusCode, + QualifiedName, }; -use crate::{ - address_space::{AddressSpace, node::NodeType} -}; +use crate::address_space::{node::NodeType, AddressSpace}; /// Given a browse path consisting of browse names, walk nodes from the root until we find a single node (or not). /// This function is a simplified use case for event filters and such like where a browse path /// is defined as an array and doesn't need to be parsed out of a relative path. All nodes in the /// path must be objects or variables. -pub(crate) fn find_node_from_browse_path<'a>(address_space: &'a AddressSpace, parent_node_id: &NodeId, browse_path: &[QualifiedName]) -> Result<&'a NodeType, StatusCode> { +pub(crate) fn find_node_from_browse_path<'a>( + address_space: &'a AddressSpace, + parent_node_id: &NodeId, + browse_path: &[QualifiedName], +) -> Result<&'a NodeType, StatusCode> { if browse_path.is_empty() { Err(StatusCode::BadNotFound) } else { @@ -47,11 +49,17 @@ pub(crate) fn find_node_from_browse_path<'a>(address_space: &'a AddressSpace, pa if let Some(found_node_id) = found_node_id { parent_node_id = found_node_id.clone(); } else { - debug!("Cannot find node under {} with browse_path of {:?}/1", parent_node_id, browse_path); + debug!( + "Cannot find node under {} with browse_path of {:?}/1", + parent_node_id, browse_path + ); return Err(StatusCode::BadNotFound); } } else { - debug!("Cannot find node under {} with browse_path of {:?}/2", parent_node_id, browse_path); + debug!( + "Cannot find node under {} with browse_path of {:?}/2", + parent_node_id, browse_path + ); return Err(StatusCode::BadNotFound); } } @@ -60,7 +68,11 @@ pub(crate) fn find_node_from_browse_path<'a>(address_space: &'a AddressSpace, pa } /// Given a `RelativePath`, find all the nodes that match against it. -pub(crate) fn find_nodes_relative_path(address_space: &AddressSpace, node_id: &NodeId, relative_path: &RelativePath) -> Result, StatusCode> { +pub(crate) fn find_nodes_relative_path( + address_space: &AddressSpace, + node_id: &NodeId, + relative_path: &RelativePath, +) -> Result, StatusCode> { match address_space.find_node(node_id) { None => { trace!("find_nodes_relative_path cannot find node {}", node_id); @@ -88,7 +100,9 @@ pub(crate) fn find_nodes_relative_path(address_space: &AddressSpace, node_id: &N matching_nodes.drain(..).for_each(|node_id| { trace!("Following relative path on node {}", node_id); // Iterate current set of nodes and put the results into next - if let Some(mut result) = follow_relative_path(address_space, &node_id, element) { + if let Some(mut result) = + follow_relative_path(address_space, &node_id, element) + { trace!(" Found matching nodes {:#?}", result); next_matching_nodes.append(&mut result); } else { @@ -113,7 +127,11 @@ pub(crate) fn find_nodes_relative_path(address_space: &AddressSpace, node_id: &N } } -fn follow_relative_path(address_space: &AddressSpace, node_id: &NodeId, relative_path: &RelativePathElement) -> Option> { +fn follow_relative_path( + address_space: &AddressSpace, + node_id: &NodeId, + relative_path: &RelativePathElement, +) -> Option> { let reference_filter = { if let Ok(reference_type_id) = relative_path.reference_type_id.as_reference_type_id() { Some((reference_type_id, relative_path.include_subtypes)) diff --git a/server/src/address_space/variable.rs b/server/src/address_space/variable.rs index 1f350ef88..8ad3f16f3 100644 --- a/server/src/address_space/variable.rs +++ b/server/src/address_space/variable.rs @@ -11,9 +11,9 @@ use opcua_types::service_types::VariableAttributes; use crate::{ address_space::{ - AccessLevel, base::Base, + base::Base, node::{Node, NodeBase}, - UserAccessLevel, + AccessLevel, UserAccessLevel, }, callbacks::{AttributeGetter, AttributeSetter}, }; @@ -26,13 +26,19 @@ node_builder_impl_property_of!(VariableBuilder); impl VariableBuilder { /// Sets the value of the variable. - pub fn value(mut self, value: V) -> Self where V: Into { + pub fn value(mut self, value: V) -> Self + where + V: Into, + { let _ = self.node.set_value(NumericRange::None, value); self } /// Sets the data type of the variable. - pub fn data_type(mut self, data_type: T) -> Self where T: Into { + pub fn data_type(mut self, data_type: T) -> Self + where + T: Into, + { self.node.set_data_type(data_type); self } @@ -69,35 +75,42 @@ impl VariableBuilder { /// Makes the variable writable (by default it isn't) pub fn writable(mut self) -> Self { - self.node.set_user_access_level(self.node.user_access_level() | UserAccessLevel::CURRENT_WRITE); - self.node.set_access_level(self.node.access_level() | AccessLevel::CURRENT_WRITE); + self.node + .set_user_access_level(self.node.user_access_level() | UserAccessLevel::CURRENT_WRITE); + self.node + .set_access_level(self.node.access_level() | AccessLevel::CURRENT_WRITE); self } /// Makes the variable history-readable pub fn history_readable(mut self) -> Self { - self.node.set_user_access_level(self.node.user_access_level() | UserAccessLevel::HISTORY_READ); - self.node.set_access_level(self.node.access_level() | AccessLevel::HISTORY_READ); + self.node + .set_user_access_level(self.node.user_access_level() | UserAccessLevel::HISTORY_READ); + self.node + .set_access_level(self.node.access_level() | AccessLevel::HISTORY_READ); self } /// Makes the variable history-updateable pub fn history_updatable(mut self) -> Self { - self.node.set_user_access_level(self.node.user_access_level() | UserAccessLevel::HISTORY_WRITE); - self.node.set_access_level(self.node.access_level() | AccessLevel::HISTORY_WRITE); + self.node + .set_user_access_level(self.node.user_access_level() | UserAccessLevel::HISTORY_WRITE); + self.node + .set_access_level(self.node.access_level() | AccessLevel::HISTORY_WRITE); self } /// Sets the minimum sampling interval for the variable. pub fn minimum_sampling_interval(mut self, minimum_sampling_interval: f64) -> Self { - self.node.set_minimum_sampling_interval(minimum_sampling_interval); + self.node + .set_minimum_sampling_interval(minimum_sampling_interval); self } /// Sets a value getter function for the variable. Whenever the value of a variable /// needs to be fetched (e.g. from a monitored item subscription), this trait will be called /// to get the value. - pub fn value_getter(mut self, getter: Arc>) -> Self { + pub fn value_getter(mut self, getter: Arc>) -> Self { self.node.set_value_getter(getter); self } @@ -111,13 +124,27 @@ impl VariableBuilder { } /// Add a reference to the variable indicating it has a type of another node. - pub fn has_type_definition(self, type_id: T) -> Self where T: Into { - self.reference(type_id, ReferenceTypeId::HasTypeDefinition, ReferenceDirection::Forward) + pub fn has_type_definition(self, type_id: T) -> Self + where + T: Into, + { + self.reference( + type_id, + ReferenceTypeId::HasTypeDefinition, + ReferenceDirection::Forward, + ) } /// Add a reference to the variable indicating it has a modelling rule of another node. - pub fn has_modelling_rule(self, type_id: T) -> Self where T: Into { - self.reference(type_id, ReferenceTypeId::HasModellingRule, ReferenceDirection::Forward) + pub fn has_modelling_rule(self, type_id: T) -> Self + where + T: Into, + { + self.reference( + type_id, + ReferenceTypeId::HasModellingRule, + ReferenceDirection::Forward, + ) } } @@ -163,61 +190,94 @@ impl Default for Variable { node_base_impl!(Variable); impl Node for Variable { - fn get_attribute_max_age(&self, timestamps_to_return: TimestampsToReturn, attribute_id: AttributeId, index_range: NumericRange, data_encoding: &QualifiedName, max_age: f64) -> Option { + fn get_attribute_max_age( + &self, + timestamps_to_return: TimestampsToReturn, + attribute_id: AttributeId, + index_range: NumericRange, + data_encoding: &QualifiedName, + max_age: f64, + ) -> Option { /* TODO for Variables derived from the Structure data type, the AttributeId::Value should check data encoding and return the value encoded according "Default Binary", "Default XML" or "Default JSON" (OPC UA 1.04). */ match attribute_id { // Mandatory attributes - AttributeId::Value => Some(self.value(timestamps_to_return, index_range, data_encoding, max_age)), + AttributeId::Value => { + Some(self.value(timestamps_to_return, index_range, data_encoding, max_age)) + } AttributeId::DataType => Some(self.data_type().into()), AttributeId::Historizing => Some(self.historizing().into()), AttributeId::ValueRank => Some(self.value_rank().into()), AttributeId::AccessLevel => Some(self.access_level().bits().into()), AttributeId::UserAccessLevel => Some(self.user_access_level().bits().into()), // Optional attributes - AttributeId::ArrayDimensions => self.array_dimensions().map(|v| Variant::from(v).into()), - AttributeId::MinimumSamplingInterval => self.minimum_sampling_interval().map(|v| v.into()), - _ => self.base.get_attribute_max_age(timestamps_to_return, attribute_id, index_range, data_encoding, max_age) + AttributeId::ArrayDimensions => { + self.array_dimensions().map(|v| Variant::from(v).into()) + } + AttributeId::MinimumSamplingInterval => { + self.minimum_sampling_interval().map(|v| v.into()) + } + _ => self.base.get_attribute_max_age( + timestamps_to_return, + attribute_id, + index_range, + data_encoding, + max_age, + ), } } - fn set_attribute(&mut self, attribute_id: AttributeId, value: Variant) -> Result<(), StatusCode> { + fn set_attribute( + &mut self, + attribute_id: AttributeId, + value: Variant, + ) -> Result<(), StatusCode> { match attribute_id { - AttributeId::DataType => if let Variant::NodeId(v) = value { - self.set_data_type(*v); - Ok(()) - } else { - Err(StatusCode::BadTypeMismatch) - }, - AttributeId::Historizing => if let Variant::Boolean(v) = value { - self.set_historizing(v); - Ok(()) - } else { - Err(StatusCode::BadTypeMismatch) - }, - AttributeId::ValueRank => if let Variant::Int32(v) = value { - self.set_value_rank(v); - Ok(()) - } else { - Err(StatusCode::BadTypeMismatch) - }, + AttributeId::DataType => { + if let Variant::NodeId(v) = value { + self.set_data_type(*v); + Ok(()) + } else { + Err(StatusCode::BadTypeMismatch) + } + } + AttributeId::Historizing => { + if let Variant::Boolean(v) = value { + self.set_historizing(v); + Ok(()) + } else { + Err(StatusCode::BadTypeMismatch) + } + } + AttributeId::ValueRank => { + if let Variant::Int32(v) = value { + self.set_value_rank(v); + Ok(()) + } else { + Err(StatusCode::BadTypeMismatch) + } + } AttributeId::Value => { // Call set_value directly self.set_value(NumericRange::None, value) } - AttributeId::AccessLevel => if let Variant::Byte(v) = value { - self.set_access_level(AccessLevel::from_bits_truncate(v)); - Ok(()) - } else { - Err(StatusCode::BadTypeMismatch) - }, - AttributeId::UserAccessLevel => if let Variant::Byte(v) = value { - self.set_user_access_level(UserAccessLevel::from_bits_truncate(v)); - Ok(()) - } else { - Err(StatusCode::BadTypeMismatch) - }, + AttributeId::AccessLevel => { + if let Variant::Byte(v) = value { + self.set_access_level(AccessLevel::from_bits_truncate(v)); + Ok(()) + } else { + Err(StatusCode::BadTypeMismatch) + } + } + AttributeId::UserAccessLevel => { + if let Variant::Byte(v) = value { + self.set_user_access_level(UserAccessLevel::from_bits_truncate(v)); + Ok(()) + } else { + Err(StatusCode::BadTypeMismatch) + } + } AttributeId::ArrayDimensions => { let array_dimensions = >::try_from(&value); if let Ok(array_dimensions) = array_dimensions { @@ -227,13 +287,15 @@ impl Node for Variable { Err(StatusCode::BadTypeMismatch) } } - AttributeId::MinimumSamplingInterval => if let Variant::Double(v) = value { - self.set_minimum_sampling_interval(v); - Ok(()) - } else { - Err(StatusCode::BadTypeMismatch) - }, - _ => self.base.set_attribute(attribute_id, value) + AttributeId::MinimumSamplingInterval => { + if let Variant::Double(v) = value { + self.set_minimum_sampling_interval(v); + Ok(()) + } else { + Err(StatusCode::BadTypeMismatch) + } + } + _ => self.base.set_attribute(attribute_id, value), } } } @@ -245,31 +307,60 @@ impl Variable { /// inferred types for data type or value rank are wrong, they may be explicitly set, or /// call `new_data_value()` instead. pub fn new(node_id: &NodeId, browse_name: R, display_name: S, value: V) -> Variable - where R: Into, - S: Into, - V: Into + where + R: Into, + S: Into, + V: Into, { let value = value.into(); let data_type = value.scalar_data_type().or_else(|| value.array_data_type()); if let Some(data_type) = data_type { - Variable::new_data_value(node_id, browse_name, display_name, data_type, None, None, value) + Variable::new_data_value( + node_id, + browse_name, + display_name, + data_type, + None, + None, + value, + ) } else { panic!("Data type cannot be inferred from the value, use another constructor such as new_data_value") } } - pub fn from_attributes(node_id: &NodeId, browse_name: S, attributes: VariableAttributes) -> Result - where S: Into + pub fn from_attributes( + node_id: &NodeId, + browse_name: S, + attributes: VariableAttributes, + ) -> Result + where + S: Into, { - let mandatory_attributes = AttributesMask::DISPLAY_NAME | AttributesMask::ACCESS_LEVEL | AttributesMask::USER_ACCESS_LEVEL | - AttributesMask::DATA_TYPE | AttributesMask::HISTORIZING | AttributesMask::VALUE | AttributesMask::VALUE_RANK; + let mandatory_attributes = AttributesMask::DISPLAY_NAME + | AttributesMask::ACCESS_LEVEL + | AttributesMask::USER_ACCESS_LEVEL + | AttributesMask::DATA_TYPE + | AttributesMask::HISTORIZING + | AttributesMask::VALUE + | AttributesMask::VALUE_RANK; let mask = AttributesMask::from_bits(attributes.specified_attributes).ok_or(())?; if mask.contains(mandatory_attributes) { - let mut node = Self::new_data_value(node_id, browse_name, attributes.display_name, attributes.data_type, None, None, attributes.value); + let mut node = Self::new_data_value( + node_id, + browse_name, + attributes.display_name, + attributes.data_type, + None, + None, + attributes.value, + ); node.set_value_rank(attributes.value_rank); node.set_historizing(attributes.historizing); node.set_access_level(AccessLevel::from_bits_truncate(attributes.access_level)); - node.set_user_access_level(UserAccessLevel::from_bits_truncate(attributes.user_access_level)); + node.set_user_access_level(UserAccessLevel::from_bits_truncate( + attributes.user_access_level, + )); if mask.contains(AttributesMask::DESCRIPTION) { node.set_description(attributes.description); @@ -294,11 +385,20 @@ impl Variable { } /// Constructs a new variable with the specified id, name, type and value - pub fn new_data_value(node_id: &NodeId, browse_name: R, display_name: S, data_type: N, value_rank: Option, array_dimensions: Option, value: V) -> Variable - where R: Into, - S: Into, - N: Into, - V: Into + pub fn new_data_value( + node_id: &NodeId, + browse_name: R, + display_name: S, + data_type: N, + value_rank: Option, + array_dimensions: Option, + value: V, + ) -> Variable + where + R: Into, + S: Into, + N: Into, + V: Into, { let value = value.into(); let array_dimensions = if let Some(array_dimensions) = array_dimensions { @@ -311,10 +411,16 @@ impl Variable { } else { // Multidimensional arrays encode/decode dimensions with Int32 in Part 6, but arrayDimensions in Part 3 // wants them as u32. Go figure... So convert Int32 to u32 - Some(array.dimensions.iter().map(|v| *v as u32).collect::>()) + Some( + array + .dimensions + .iter() + .map(|v| *v as u32) + .collect::>(), + ) } } - _ => None + _ => None, } }; @@ -349,12 +455,28 @@ impl Variable { !self.data_type.is_null() && self.base.is_valid() } - pub fn value(&self, timestamps_to_return: TimestampsToReturn, index_range: NumericRange, data_encoding: &QualifiedName, max_age: f64) -> DataValue { + pub fn value( + &self, + timestamps_to_return: TimestampsToReturn, + index_range: NumericRange, + data_encoding: &QualifiedName, + max_age: f64, + ) -> DataValue { use std::i32; if let Some(ref value_getter) = self.value_getter { let mut value_getter = value_getter.lock().unwrap(); - value_getter.get(&self.node_id(), timestamps_to_return, AttributeId::Value, index_range, data_encoding, max_age).unwrap().unwrap() + value_getter + .get( + &self.node_id(), + timestamps_to_return, + AttributeId::Value, + index_range, + data_encoding, + max_age, + ) + .unwrap() + .unwrap() } else { let data_value = &self.value; let mut result = DataValue { @@ -387,7 +509,10 @@ impl Variable { } /// Sets the variable's `Variant` value. The timestamps for the change are updated to now. - pub fn set_value(&mut self, index_range: NumericRange, value: V) -> Result<(), StatusCode> where V: Into { + pub fn set_value(&mut self, index_range: NumericRange, value: V) -> Result<(), StatusCode> + where + V: Into, + { let mut value = value.into(); // A special case is required here for when the variable is a single dimension @@ -407,7 +532,12 @@ impl Variable { // The value is set to the value getter if let Some(ref value_setter) = self.value_setter { let mut value_setter = value_setter.lock().unwrap(); - value_setter.set(&self.node_id(), AttributeId::Value, index_range, value.into()) + value_setter.set( + &self.node_id(), + AttributeId::Value, + index_range, + value.into(), + ) } else { let now = DateTime::now(); if index_range.has_range() { @@ -419,7 +549,14 @@ impl Variable { } // Set a range value - pub fn set_value_range(&mut self, value: Variant, index_range: NumericRange, status_code: StatusCode, server_timestamp: &DateTime, source_timestamp: &DateTime) -> Result<(), StatusCode> { + pub fn set_value_range( + &mut self, + value: Variant, + index_range: NumericRange, + status_code: StatusCode, + server_timestamp: &DateTime, + source_timestamp: &DateTime, + ) -> Result<(), StatusCode> { match self.value.value { Some(ref mut full_value) => { // Overwrite a partial section of the value @@ -429,12 +566,21 @@ impl Variable { self.value.source_timestamp = Some(source_timestamp.clone()); Ok(()) } - None => Err(StatusCode::BadIndexRangeInvalid) + None => Err(StatusCode::BadIndexRangeInvalid), } } /// Sets the variable's `DataValue` - pub fn set_value_direct(&mut self, value: V, status_code: StatusCode, server_timestamp: &DateTime, source_timestamp: &DateTime) -> Result<(), StatusCode> where V: Into { + pub fn set_value_direct( + &mut self, + value: V, + status_code: StatusCode, + server_timestamp: &DateTime, + source_timestamp: &DateTime, + ) -> Result<(), StatusCode> + where + V: Into, + { self.value.value = Some(value.into()); self.value.status = Some(status_code); self.value.server_timestamp = Some(server_timestamp.clone()); @@ -501,12 +647,14 @@ impl Variable { /// Test if the variable is user readable. pub fn is_user_readable(&self) -> bool { - self.user_access_level().contains(UserAccessLevel::CURRENT_READ) + self.user_access_level() + .contains(UserAccessLevel::CURRENT_READ) } /// Test if the variable is user writable. pub fn is_user_writable(&self) -> bool { - self.user_access_level().contains(UserAccessLevel::CURRENT_WRITE) + self.user_access_level() + .contains(UserAccessLevel::CURRENT_WRITE) } /// Returns the user access level of the variable. @@ -547,7 +695,10 @@ impl Variable { self.data_type.clone() } - pub fn set_data_type(&mut self, data_type: T) where T: Into { + pub fn set_data_type(&mut self, data_type: T) + where + T: Into, + { self.data_type = data_type.into(); } -} \ No newline at end of file +} diff --git a/server/src/address_space/variable_type.rs b/server/src/address_space/variable_type.rs index 66451e4e7..9481aa4b0 100644 --- a/server/src/address_space/variable_type.rs +++ b/server/src/address_space/variable_type.rs @@ -42,19 +42,38 @@ impl Default for VariableType { node_base_impl!(VariableType); impl Node for VariableType { - fn get_attribute_max_age(&self, timestamps_to_return: TimestampsToReturn, attribute_id: AttributeId, index_range: NumericRange, data_encoding: &QualifiedName, max_age: f64) -> Option { + fn get_attribute_max_age( + &self, + timestamps_to_return: TimestampsToReturn, + attribute_id: AttributeId, + index_range: NumericRange, + data_encoding: &QualifiedName, + max_age: f64, + ) -> Option { match attribute_id { AttributeId::Value => self.value(), AttributeId::DataType => Some(self.data_type().into()), AttributeId::IsAbstract => Some(self.is_abstract().into()), AttributeId::ValueRank => Some(self.value_rank().into()), // Optional attributes - AttributeId::ArrayDimensions => self.array_dimensions().map(|v| DataValue::value_only(v)), - _ => self.base.get_attribute_max_age(timestamps_to_return, attribute_id, index_range, data_encoding, max_age) + AttributeId::ArrayDimensions => { + self.array_dimensions().map(|v| DataValue::value_only(v)) + } + _ => self.base.get_attribute_max_age( + timestamps_to_return, + attribute_id, + index_range, + data_encoding, + max_age, + ), } } - fn set_attribute(&mut self, attribute_id: AttributeId, value: Variant) -> Result<(), StatusCode> { + fn set_attribute( + &mut self, + attribute_id: AttributeId, + value: Variant, + ) -> Result<(), StatusCode> { match attribute_id { AttributeId::DataType => { if let Variant::NodeId(v) = value { @@ -93,15 +112,23 @@ impl Node for VariableType { Err(StatusCode::BadTypeMismatch) } } - _ => self.base.set_attribute(attribute_id, value) + _ => self.base.set_attribute(attribute_id, value), } } } impl VariableType { - pub fn new(node_id: &NodeId, browse_name: R, display_name: S, data_type: NodeId, is_abstract: bool, value_rank: i32) -> VariableType - where R: Into, - S: Into, + pub fn new( + node_id: &NodeId, + browse_name: R, + display_name: S, + data_type: NodeId, + is_abstract: bool, + value_rank: i32, + ) -> VariableType + where + R: Into, + S: Into, { VariableType { base: Base::new(NodeClass::VariableType, node_id, browse_name, display_name), @@ -113,15 +140,28 @@ impl VariableType { } } - pub fn from_attributes(node_id: &NodeId, browse_name: S, attributes: VariableTypeAttributes) -> Result - where S: Into + pub fn from_attributes( + node_id: &NodeId, + browse_name: S, + attributes: VariableTypeAttributes, + ) -> Result + where + S: Into, { - let mandatory_attributes = AttributesMask::DISPLAY_NAME | AttributesMask::IS_ABSTRACT | - AttributesMask::DATA_TYPE | AttributesMask::VALUE_RANK; + let mandatory_attributes = AttributesMask::DISPLAY_NAME + | AttributesMask::IS_ABSTRACT + | AttributesMask::DATA_TYPE + | AttributesMask::VALUE_RANK; let mask = AttributesMask::from_bits(attributes.specified_attributes).ok_or(())?; if mask.contains(mandatory_attributes) { - let mut node = Self::new(node_id, browse_name, attributes.display_name, - attributes.data_type, attributes.is_abstract, attributes.value_rank); + let mut node = Self::new( + node_id, + browse_name, + attributes.display_name, + attributes.data_type, + attributes.is_abstract, + attributes.value_rank, + ); if mask.contains(AttributesMask::DESCRIPTION) { node.set_description(attributes.description); } @@ -152,7 +192,10 @@ impl VariableType { self.data_type.clone() } - pub fn set_data_type(&mut self, data_type: T) where T: Into { + pub fn set_data_type(&mut self, data_type: T) + where + T: Into, + { self.data_type = data_type.into(); } @@ -184,7 +227,10 @@ impl VariableType { self.value.clone() } - pub fn set_value(&mut self, value: V) where V: Into { + pub fn set_value(&mut self, value: V) + where + V: Into, + { self.value = Some(DataValue::new_now(value)); } -} \ No newline at end of file +} diff --git a/server/src/address_space/view.rs b/server/src/address_space/view.rs index a433ed357..93773996c 100644 --- a/server/src/address_space/view.rs +++ b/server/src/address_space/view.rs @@ -6,10 +6,7 @@ use opcua_types::service_types::ViewAttributes; -use crate::address_space::{ - base::Base, - EventNotifier, node::Node, node::NodeBase, -}; +use crate::address_space::{base::Base, node::Node, node::NodeBase, EventNotifier}; node_builder_impl!(ViewBuilder, View); @@ -48,15 +45,32 @@ impl Default for View { node_base_impl!(View); impl Node for View { - fn get_attribute_max_age(&self, timestamps_to_return: TimestampsToReturn, attribute_id: AttributeId, index_range: NumericRange, data_encoding: &QualifiedName, max_age: f64) -> Option { + fn get_attribute_max_age( + &self, + timestamps_to_return: TimestampsToReturn, + attribute_id: AttributeId, + index_range: NumericRange, + data_encoding: &QualifiedName, + max_age: f64, + ) -> Option { match attribute_id { AttributeId::EventNotifier => Some(Variant::from(self.event_notifier().bits()).into()), AttributeId::ContainsNoLoops => Some(Variant::from(self.contains_no_loops()).into()), - _ => self.base.get_attribute_max_age(timestamps_to_return, attribute_id, index_range, data_encoding, max_age) + _ => self.base.get_attribute_max_age( + timestamps_to_return, + attribute_id, + index_range, + data_encoding, + max_age, + ), } } - fn set_attribute(&mut self, attribute_id: AttributeId, value: Variant) -> Result<(), StatusCode> { + fn set_attribute( + &mut self, + attribute_id: AttributeId, + value: Variant, + ) -> Result<(), StatusCode> { match attribute_id { AttributeId::EventNotifier => { if let Variant::Byte(v) = value { @@ -74,15 +88,22 @@ impl Node for View { Err(StatusCode::BadTypeMismatch) } } - _ => self.base.set_attribute(attribute_id, value) + _ => self.base.set_attribute(attribute_id, value), } } } impl View { - pub fn new(node_id: &NodeId, browse_name: R, display_name: S, event_notifier: EventNotifier, contains_no_loops: bool) -> View - where R: Into, - S: Into, + pub fn new( + node_id: &NodeId, + browse_name: R, + display_name: S, + event_notifier: EventNotifier, + contains_no_loops: bool, + ) -> View + where + R: Into, + S: Into, { View { base: Base::new(NodeClass::View, node_id, browse_name, display_name), @@ -91,14 +112,27 @@ impl View { } } - pub fn from_attributes(node_id: &NodeId, browse_name: S, attributes: ViewAttributes) -> Result - where S: Into + pub fn from_attributes( + node_id: &NodeId, + browse_name: S, + attributes: ViewAttributes, + ) -> Result + where + S: Into, { - let mandatory_attributes = AttributesMask::DISPLAY_NAME | AttributesMask::EVENT_NOTIFIER | AttributesMask::CONTAINS_NO_LOOPS; + let mandatory_attributes = AttributesMask::DISPLAY_NAME + | AttributesMask::EVENT_NOTIFIER + | AttributesMask::CONTAINS_NO_LOOPS; let mask = AttributesMask::from_bits_truncate(attributes.specified_attributes); if mask.contains(mandatory_attributes) { let event_notifier = EventNotifier::from_bits_truncate(attributes.event_notifier); - let mut node = Self::new(node_id, browse_name, attributes.display_name, event_notifier, attributes.contains_no_loops); + let mut node = Self::new( + node_id, + browse_name, + attributes.display_name, + event_notifier, + attributes.contains_no_loops, + ); if mask.contains(AttributesMask::DESCRIPTION) { node.set_description(attributes.description); } diff --git a/server/src/builder.rs b/server/src/builder.rs index 074007f21..571387294 100644 --- a/server/src/builder.rs +++ b/server/src/builder.rs @@ -7,8 +7,8 @@ use std::path::PathBuf; use opcua_core::config::Config; use crate::{ - constants, config::{ServerConfig, ServerEndpoint, ServerUserToken, ANONYMOUS_USER_TOKEN_ID}, + constants, server::Server, }; @@ -26,7 +26,7 @@ pub struct ServerBuilder { impl ServerBuilder { pub fn new() -> Self { Self { - config: ServerConfig::default() + config: ServerConfig::default(), } } @@ -36,14 +36,18 @@ impl ServerBuilder { } /// Creates a simple endpoint that accepts anonymous connections - pub fn new_anonymous(application_name: T) -> Self where T: Into { + pub fn new_anonymous(application_name: T) -> Self + where + T: Into, + { let user_token_ids = vec![ANONYMOUS_USER_TOKEN_ID.to_string()]; Self::new() .application_name(application_name) - .endpoint("none", ServerEndpoint::new_none(DEFAULT_ENDPOINT_PATH, &user_token_ids)) - .discovery_urls(vec![ - DEFAULT_ENDPOINT_PATH.into() - ]) + .endpoint( + "none", + ServerEndpoint::new_none(DEFAULT_ENDPOINT_PATH, &user_token_ids), + ) + .discovery_urls(vec![DEFAULT_ENDPOINT_PATH.into()]) } /// Creates and yields a builder which is configured with the sample server configuration. @@ -53,8 +57,14 @@ impl ServerBuilder { let path = DEFAULT_ENDPOINT_PATH; - let user_token_ids = ["sample_password_user", "sample_x509_user", ANONYMOUS_USER_TOKEN_ID] - .iter().map(|u| u.to_string()).collect::>(); + let user_token_ids = [ + "sample_password_user", + "sample_x509_user", + ANONYMOUS_USER_TOKEN_ID, + ] + .iter() + .map(|u| u.to_string()) + .collect::>(); Self::new() .application_name("OPC UA Sample Server") @@ -65,41 +75,78 @@ impl ServerBuilder { .private_key_path("private/private.pem") .pki_dir("./pki") .discovery_server_url(Some(constants::DEFAULT_DISCOVERY_SERVER_URL.to_string())) - .user_token("sample_password_user", ServerUserToken { - user: "sample1".to_string(), - pass: Some("sample1pwd".to_string()), - x509: None, - thumbprint: None, - }) - .user_token("sample_x509_user", ServerUserToken { - user: "sample_x509".to_string(), - pass: None, - x509: Some("./users/sample-x509.der".to_string()), - thumbprint: None, - }) - .user_token("unused_user", ServerUserToken { - user: "unused".to_string(), - pass: Some("unused1".to_string()), - x509: None, - thumbprint: None, - }) + .user_token( + "sample_password_user", + ServerUserToken { + user: "sample1".to_string(), + pass: Some("sample1pwd".to_string()), + x509: None, + thumbprint: None, + }, + ) + .user_token( + "sample_x509_user", + ServerUserToken { + user: "sample_x509".to_string(), + pass: None, + x509: Some("./users/sample-x509.der".to_string()), + thumbprint: None, + }, + ) + .user_token( + "unused_user", + ServerUserToken { + user: "unused".to_string(), + pass: Some("unused1".to_string()), + x509: None, + thumbprint: None, + }, + ) .endpoints(vec![ ("none", ServerEndpoint::new_none(path, &user_token_ids)), - ("basic128rsa15_sign", ServerEndpoint::new_basic128rsa15_sign(path, &user_token_ids)), - ("basic128rsa15_sign_encrypt", ServerEndpoint::new_basic128rsa15_sign_encrypt(path, &user_token_ids)), - ("aes128-sha256-rsaoaep_sign", ServerEndpoint::new_aes128_sha256_rsaoaep_sign(path, &user_token_ids)), - ("aes128-sha256-rsaoaep_sign_encrypt", ServerEndpoint::new_aes128_sha256_rsaoaep_sign_encrypt(path, &user_token_ids)), - ("aes256-sha256-rsapss_sign", ServerEndpoint::new_aes256_sha256_rsapss_sign(path, &user_token_ids)), - ("aes256-sha256-rsapss_sign_encrypt", ServerEndpoint::new_aes256_sha256_rsapss_sign_encrypt(path, &user_token_ids)), - ("basic256_sign", ServerEndpoint::new_basic256_sign(path, &user_token_ids)), - ("basic256_sign_encrypt", ServerEndpoint::new_basic256_sign_encrypt(path, &user_token_ids)), - ("basic256sha256_sign", ServerEndpoint::new_basic256sha256_sign(path, &user_token_ids)), - ("basic256sha256_sign_encrypt", ServerEndpoint::new_basic256sha256_sign_encrypt(path, &user_token_ids)), - ("no_access", ServerEndpoint::new_none("/noaccess", &[])) - ]) - .discovery_urls(vec![ - DEFAULT_ENDPOINT_PATH.into() + ( + "basic128rsa15_sign", + ServerEndpoint::new_basic128rsa15_sign(path, &user_token_ids), + ), + ( + "basic128rsa15_sign_encrypt", + ServerEndpoint::new_basic128rsa15_sign_encrypt(path, &user_token_ids), + ), + ( + "aes128-sha256-rsaoaep_sign", + ServerEndpoint::new_aes128_sha256_rsaoaep_sign(path, &user_token_ids), + ), + ( + "aes128-sha256-rsaoaep_sign_encrypt", + ServerEndpoint::new_aes128_sha256_rsaoaep_sign_encrypt(path, &user_token_ids), + ), + ( + "aes256-sha256-rsapss_sign", + ServerEndpoint::new_aes256_sha256_rsapss_sign(path, &user_token_ids), + ), + ( + "aes256-sha256-rsapss_sign_encrypt", + ServerEndpoint::new_aes256_sha256_rsapss_sign_encrypt(path, &user_token_ids), + ), + ( + "basic256_sign", + ServerEndpoint::new_basic256_sign(path, &user_token_ids), + ), + ( + "basic256_sign_encrypt", + ServerEndpoint::new_basic256_sign_encrypt(path, &user_token_ids), + ), + ( + "basic256sha256_sign", + ServerEndpoint::new_basic256sha256_sign(path, &user_token_ids), + ), + ( + "basic256sha256_sign_encrypt", + ServerEndpoint::new_basic256sha256_sign_encrypt(path, &user_token_ids), + ), + ("no_access", ServerEndpoint::new_none("/noaccess", &[])), ]) + .discovery_urls(vec![DEFAULT_ENDPOINT_PATH.into()]) } /// Yields a [`Client`] from the values set by the builder. If the builder is not in a valid state @@ -127,19 +174,28 @@ impl ServerBuilder { } /// Sets the application name. - pub fn application_name(mut self, application_name: T) -> Self where T: Into { + pub fn application_name(mut self, application_name: T) -> Self + where + T: Into, + { self.config.application_name = application_name.into(); self } /// Sets the application uri - pub fn application_uri(mut self, application_uri: T) -> Self where T: Into { + pub fn application_uri(mut self, application_uri: T) -> Self + where + T: Into, + { self.config.application_uri = application_uri.into(); self } /// Sets the product uri. - pub fn product_uri(mut self, product_uri: T) -> Self where T: Into { + pub fn product_uri(mut self, product_uri: T) -> Self + where + T: Into, + { self.config.product_uri = product_uri.into(); self } @@ -154,7 +210,10 @@ impl ServerBuilder { /// Sets a custom server certificate path. The path is required to be provided as a partial /// path relative to the PKI directory. If set, this path will be used to read the server /// certificate from disk. The certificate can be in either the .der or .pem format. - pub fn certificate_path(mut self, certificate_path: T) -> Self where T: Into { + pub fn certificate_path(mut self, certificate_path: T) -> Self + where + T: Into, + { self.config.certificate_path = Some(certificate_path.into()); self } @@ -162,35 +221,52 @@ impl ServerBuilder { /// Sets a custom private key path. The path is required to be provided as a partial path /// relative to the PKI directory. If set, this path will be used to read the private key /// from disk. - pub fn private_key_path(mut self, private_key_path: T) -> Self where T: Into { + pub fn private_key_path(mut self, private_key_path: T) -> Self + where + T: Into, + { self.config.private_key_path = Some(private_key_path.into()); self } /// Sets the pki directory where client's own key pair is stored and where `/trusted` and /// `/rejected` server certificates are stored. - pub fn pki_dir(mut self, pki_dir: T) -> Self where T: Into { + pub fn pki_dir(mut self, pki_dir: T) -> Self + where + T: Into, + { self.config.pki_dir = pki_dir.into(); self } /// Adds an endpoint to the list of endpoints the client knows of. - pub fn endpoint(mut self, endpoint_id: T, endpoint: ServerEndpoint) -> Self where T: Into { + pub fn endpoint(mut self, endpoint_id: T, endpoint: ServerEndpoint) -> Self + where + T: Into, + { self.config.endpoints.insert(endpoint_id.into(), endpoint); self } /// Adds multiple endpoints to the list of endpoints the client knows of. - pub fn endpoints(mut self, endpoints: Vec<(T, ServerEndpoint)>) -> Self where T: Into { + pub fn endpoints(mut self, endpoints: Vec<(T, ServerEndpoint)>) -> Self + where + T: Into, + { for e in endpoints { self.config.endpoints.insert(e.0.into(), e.1); - }; + } self } /// Adds a user token to the server. - pub fn user_token(mut self, user_token_id: T, user_token: ServerUserToken) -> Self where T: Into { - self.config.user_tokens.insert(user_token_id.into(), user_token); + pub fn user_token(mut self, user_token_id: T, user_token: ServerUserToken) -> Self + where + T: Into, + { + self.config + .user_tokens + .insert(user_token_id.into(), user_token); self } @@ -201,7 +277,10 @@ impl ServerBuilder { } /// Sets the hostname and port to listen on - pub fn host_and_port(mut self, host: T, port: u16) -> Self where T: Into { + pub fn host_and_port(mut self, host: T, port: u16) -> Self + where + T: Into, + { self.config.tcp_config.host = host.into(); self.config.tcp_config.port = port; self @@ -211,14 +290,20 @@ impl ServerBuilder { /// If the url is relative, e.g. "/" then the code will make a url for you using the port/host /// settings as they are at the time this function is executed. pub fn discovery_urls(mut self, discovery_urls: Vec) -> Self { - self.config.discovery_urls = discovery_urls.iter().map(|discovery_url| { - if discovery_url.starts_with("/") { - // Turn into an opc url - format!("opc.tcp://{}:{}/", self.config.tcp_config.host, self.config.tcp_config.port) - } else { - discovery_url.clone() - } - }).collect(); + self.config.discovery_urls = discovery_urls + .iter() + .map(|discovery_url| { + if discovery_url.starts_with("/") { + // Turn into an opc url + format!( + "opc.tcp://{}:{}/", + self.config.tcp_config.host, self.config.tcp_config.port + ) + } else { + discovery_url.clone() + } + }) + .collect(); self } diff --git a/server/src/callbacks.rs b/server/src/callbacks.rs index 78c887711..f79976270 100644 --- a/server/src/callbacks.rs +++ b/server/src/callbacks.rs @@ -7,10 +7,9 @@ use std::sync::{Arc, RwLock}; use opcua_types::{ - AttributeId, DataValue, NodeId, - NumericRange, QualifiedName, service_types::{CallMethodRequest, CallMethodResult, TimestampsToReturn}, status_code::StatusCode, + AttributeId, DataValue, NodeId, NumericRange, QualifiedName, }; use crate::session::Session; @@ -31,13 +30,27 @@ use crate::session::Session; /// pub trait AttributeGetter { /// Returns a data value of the specified attribute or none. - fn get(&mut self, node_id: &NodeId, timestamps_to_return: TimestampsToReturn, attribute_id: AttributeId, index_range: NumericRange, data_encoding: &QualifiedName, max_age: f64) -> Result, StatusCode>; + fn get( + &mut self, + node_id: &NodeId, + timestamps_to_return: TimestampsToReturn, + attribute_id: AttributeId, + index_range: NumericRange, + data_encoding: &QualifiedName, + max_age: f64, + ) -> Result, StatusCode>; } // An attribute setter. Sets the value on the specified attribute pub trait AttributeSetter { /// Sets the attribute on the specified node - fn set(&mut self, node_id: &NodeId, attribute_id: AttributeId, index_range: NumericRange, data_value: DataValue) -> Result<(), StatusCode>; + fn set( + &mut self, + node_id: &NodeId, + attribute_id: AttributeId, + index_range: NumericRange, + data_value: DataValue, + ) -> Result<(), StatusCode>; } /// Called by RegisterNodes service @@ -51,7 +64,11 @@ pub trait RegisterNodes { /// /// There is no guarantee that the corresponding `OnUnregisterNodes` will be called by the client, /// therefore use the weak session references and a periodic check to perform any housekeeping. - fn register_nodes(&mut self, session: Arc>, nodes_to_register: &[NodeId]) -> Result, StatusCode>; + fn register_nodes( + &mut self, + session: Arc>, + nodes_to_register: &[NodeId], + ) -> Result, StatusCode>; } /// Called by UnregisterNodes service @@ -62,7 +79,11 @@ pub trait UnregisterNodes { /// /// The function should not validate the nodes in the request and should just ignore any /// unregistered nodes. - fn unregister_nodes(&mut self, session: Arc>, nodes_to_unregister: &[NodeId]) -> Result<(), StatusCode>; + fn unregister_nodes( + &mut self, + session: Arc>, + nodes_to_unregister: &[NodeId], + ) -> Result<(), StatusCode>; } /// Called by the Method service when it invokes a method @@ -70,5 +91,9 @@ pub trait Method { /// A method is registered via the address space to a method id and optionally an object id. /// When a client sends a CallRequest / CallMethod request, the registered object will /// be invoked to handle the call. - fn call(&mut self, session: &mut Session, request: &CallMethodRequest) -> Result; + fn call( + &mut self, + session: &mut Session, + request: &CallMethodRequest, + ) -> Result; } diff --git a/server/src/comms/mod.rs b/server/src/comms/mod.rs index b23452a7b..602236990 100644 --- a/server/src/comms/mod.rs +++ b/server/src/comms/mod.rs @@ -7,5 +7,5 @@ mod secure_channel_service; -pub mod transport; pub mod tcp_transport; +pub mod transport; diff --git a/server/src/comms/secure_channel_service.rs b/server/src/comms/secure_channel_service.rs index ea4f297a7..e7c366254 100644 --- a/server/src/comms/secure_channel_service.rs +++ b/server/src/comms/secure_channel_service.rs @@ -4,13 +4,10 @@ use std::result::Result; -use opcua_core::{ - comms::prelude::*, - supported_message::SupportedMessage -}; +use opcua_core::{comms::prelude::*, supported_message::SupportedMessage}; use opcua_crypto::SecurityPolicy; -use opcua_types::{*, status_code::StatusCode}; +use opcua_types::{status_code::StatusCode, *}; struct SecureChannelState { // Issued flag @@ -56,22 +53,29 @@ impl SecureChannelService { } } - pub fn open_secure_channel(&mut self, secure_channel: &mut SecureChannel, security_header: &SecurityHeader, client_protocol_version: u32, message: &SupportedMessage) -> Result { + pub fn open_secure_channel( + &mut self, + secure_channel: &mut SecureChannel, + security_header: &SecurityHeader, + client_protocol_version: u32, + message: &SupportedMessage, + ) -> Result { let request = match message { SupportedMessage::OpenSecureChannelRequest(request) => { trace!("Got secure channel request {:?}", request); request } _ => { - error!("message is not an open secure channel request, got {:?}", message); + error!( + "message is not an open secure channel request, got {:?}", + message + ); return Err(StatusCode::BadUnexpectedError); } }; let security_header = match security_header { - SecurityHeader::Asymmetric(security_header) => { - security_header - } + SecurityHeader::Asymmetric(security_header) => security_header, _ => { error!("Secure channel request message does not have asymmetric security header"); return Err(StatusCode::BadUnexpectedError); @@ -80,8 +84,15 @@ impl SecureChannelService { // Must compare protocol version to the one from HELLO if request.client_protocol_version != client_protocol_version { - error!("Client sent a different protocol version than it did in the HELLO - {} vs {}", request.client_protocol_version, client_protocol_version); - return Ok(ServiceFault::new(&request.request_header, StatusCode::BadProtocolVersionUnsupported).into()); + error!( + "Client sent a different protocol version than it did in the HELLO - {} vs {}", + request.client_protocol_version, client_protocol_version + ); + return Ok(ServiceFault::new( + &request.request_header, + StatusCode::BadProtocolVersionUnsupported, + ) + .into()); } // Test the request type @@ -99,10 +110,15 @@ impl SecureChannelService { // Check for a duplicate nonce. It is invalid for the renew to use the same nonce // as was used for last issue/renew. It doesn't matter when policy is none. - if secure_channel.security_policy() != SecurityPolicy::None && - request.client_nonce.as_ref() == &secure_channel.remote_nonce()[..] { + if secure_channel.security_policy() != SecurityPolicy::None + && request.client_nonce.as_ref() == &secure_channel.remote_nonce()[..] + { error!("Client reused a nonce for a renew"); - return Ok(ServiceFault::new(&request.request_header, StatusCode::BadNonceInvalid).into()); + return Ok(ServiceFault::new( + &request.request_header, + StatusCode::BadNonceInvalid, + ) + .into()); } // check to see if the secure channel has been issued before or not @@ -118,12 +134,18 @@ impl SecureChannelService { // Check the requested security mode debug!("Message security mode == {:?}", request.security_mode); match request.security_mode { - MessageSecurityMode::None | MessageSecurityMode::Sign | MessageSecurityMode::SignAndEncrypt => { + MessageSecurityMode::None + | MessageSecurityMode::Sign + | MessageSecurityMode::SignAndEncrypt => { // TODO validate NONCE } _ => { error!("Security mode is invalid"); - return Ok(ServiceFault::new(&request.request_header, StatusCode::BadSecurityModeRejected).into()); + return Ok(ServiceFault::new( + &request.request_header, + StatusCode::BadSecurityModeRejected, + ) + .into()); } } @@ -142,11 +164,16 @@ impl SecureChannelService { secure_channel.create_random_nonce(); } else { error!("Was unable to set their nonce, check logic"); - return Ok(ServiceFault::new(&request.request_header, nonce_result.unwrap_err()).into()); + return Ok( + ServiceFault::new(&request.request_header, nonce_result.unwrap_err()).into(), + ); } let security_policy = secure_channel.security_policy(); - if security_policy != SecurityPolicy::None && (security_mode == MessageSecurityMode::Sign || security_mode == MessageSecurityMode::SignAndEncrypt) { + if security_policy != SecurityPolicy::None + && (security_mode == MessageSecurityMode::Sign + || security_mode == MessageSecurityMode::SignAndEncrypt) + { secure_channel.derive_keys(); } @@ -164,8 +191,11 @@ impl SecureChannelService { Ok(response.into()) } - pub fn close_secure_channel(&mut self, _: &SupportedMessage) -> Result { + pub fn close_secure_channel( + &mut self, + _: &SupportedMessage, + ) -> Result { info!("CloseSecureChannelRequest received, session closing"); Err(StatusCode::BadConnectionClosed) } -} \ No newline at end of file +} diff --git a/server/src/comms/tcp_transport.rs b/server/src/comms/tcp_transport.rs index 020c93776..874e26795 100644 --- a/server/src/comms/tcp_transport.rs +++ b/server/src/comms/tcp_transport.rs @@ -18,13 +18,16 @@ use std::time::{Duration, Instant}; use chrono; use chrono::Utc; use futures::{ - Future, future, - Stream, + future, sync::mpsc::{self, unbounded, UnboundedReceiver, UnboundedSender}, + Future, Stream, }; use tokio::{self, net::TcpStream}; use tokio_codec::FramedRead; -use tokio_io::{AsyncRead, AsyncWrite, io::{self, ReadHalf, WriteHalf}}; +use tokio_io::{ + io::{self, ReadHalf, WriteHalf}, + AsyncRead, AsyncWrite, +}; use tokio_timer::Interval; use opcua_core::{ @@ -38,18 +41,13 @@ use opcua_core::{ prelude::*, RUNTIME, }; -use opcua_types::{status_code::StatusCode}; +use opcua_types::status_code::StatusCode; use crate::{ - address_space::types::AddressSpace, - comms::secure_channel_service::SecureChannelService, - comms::transport::*, - constants, - services::message_handler::MessageHandler, - session::Session, - state::ServerState, + address_space::types::AddressSpace, comms::secure_channel_service::SecureChannelService, + comms::transport::*, constants, services::message_handler::MessageHandler, session::Session, + state::ServerState, subscriptions::subscription::TickReason, subscriptions::PublishResponseEntry, - subscriptions::subscription::TickReason, }; // TODO these need to go, and use session settings @@ -58,17 +56,15 @@ const SEND_BUFFER_SIZE: usize = std::u16::MAX as usize; const MAX_MESSAGE_SIZE: usize = std::u16::MAX as usize; macro_rules! connection_finished_test { - ( $id: expr, $connection:expr ) => { - { - trace!("{}", $id); - let connection = trace_read_lock_unwrap!($connection); - let finished = connection.is_finished(); - if finished { - info!("{} connection finished", $id); - } - future::ok(!finished) + ( $id: expr, $connection:expr ) => {{ + trace!("{}", $id); + let connection = trace_read_lock_unwrap!($connection); + let finished = connection.is_finished(); + if finished { + info!("{} connection finished", $id); } - } + future::ok(!finished) + }}; } /// Messages that may be sent to the writer. @@ -81,7 +77,7 @@ enum Message { #[derive(Clone)] pub struct MessageSender { - sender: UnboundedSender + sender: UnboundedSender, } impl MessageSender { @@ -90,7 +86,9 @@ impl MessageSender { } pub fn send_message(&self, request_id: u32, message: SupportedMessage) { - let _ = self.sender.unbounded_send(Message::Message(request_id, message)); + let _ = self + .sender + .unbounded_send(Message::Message(request_id, message)); } } @@ -153,7 +151,10 @@ impl Transport for TcpTransport { // Terminates the connection and the session fn finish(&mut self, status_code: StatusCode) { if !self.is_finished() { - debug!("Transport is being placed in finished state, code {}", status_code); + debug!( + "Transport is being placed in finished state, code {}", + status_code + ); self.transport_state = TransportState::Finished(status_code); let mut session = trace_write_lock_unwrap!(self.session); session.set_terminated(); @@ -181,7 +182,12 @@ impl Transport for TcpTransport { } impl TcpTransport { - pub fn new(server_state: Arc>, session: Arc>, address_space: Arc>, message_handler: MessageHandler) -> TcpTransport { + pub fn new( + server_state: Arc>, + session: Arc>, + address_space: Arc>, + message_handler: MessageHandler, + ) -> TcpTransport { let (secure_channel, session_id) = { let session = trace_read_lock_unwrap!(session); (session.secure_channel(), session.session_id().clone()) @@ -206,28 +212,29 @@ impl TcpTransport { /// This is the entry point for the session. This function is asynchronous - it spawns tokio /// tasks to handle the session execution loop so this function will returns immediately. pub fn run(connection: Arc>, socket: TcpStream, looping_interval_ms: f64) { - info!("Socket info:\n Linger - {}\n Keepalive - {},\n TTL - {}", - if let Ok(v) = socket.linger() { - match v { - Some(d) => format!("{}ms", d.as_millis()), - None => "No linger".to_string(), - } - } else { - "No Linger (err)".to_string() - }, - if let Ok(v) = socket.keepalive() { - match v { - Some(d) => format!("{}ms", d.as_millis()), - None => "No Keepalive".to_string(), - } - } else { - "No Keepalive (err)".to_string() - }, - if let Ok(v) = socket.ttl() { - format!("{}", v) - } else { - "No TTL".to_string() - } + info!( + "Socket info:\n Linger - {}\n Keepalive - {},\n TTL - {}", + if let Ok(v) = socket.linger() { + match v { + Some(d) => format!("{}ms", d.as_millis()), + None => "No linger".to_string(), + } + } else { + "No Linger (err)".to_string() + }, + if let Ok(v) = socket.keepalive() { + match v { + Some(d) => format!("{}ms", d.as_millis()), + None => "No Keepalive".to_string(), + } + } else { + "No Keepalive (err)".to_string() + }, + if let Ok(v) = socket.ttl() { + format!("{}", v) + } else { + "No TTL".to_string() + } ); // Store the address of the client @@ -240,7 +247,9 @@ impl TcpTransport { Self::spawn_looping_task(connection, socket, looping_interval_ms); } - fn write_bytes_task(connection: Arc>) -> impl Future>, Error=Arc>> { + fn write_bytes_task( + connection: Arc>, + ) -> impl Future>, Error = Arc>> { let (writer, bytes_to_write, transport) = { let mut connection = trace_lock_unwrap!(connection); let writer = connection.writer.take(); @@ -265,12 +274,15 @@ impl TcpTransport { connection.writer = Some(writer); } connection - }).map_err(move |_| { - connection_for_err - }) + }) + .map_err(move |_| connection_for_err) } - fn spawn_looping_task(transport: Arc>, socket: TcpStream, looping_interval_ms: f64) { + fn spawn_looping_task( + transport: Arc>, + socket: TcpStream, + looping_interval_ms: f64, + ) { let session_start_time = Utc::now(); info!("Session started {}", session_start_time); @@ -297,8 +309,20 @@ impl TcpTransport { // reading and writing. Self::spawn_subscriptions_task(transport.clone(), tx.clone(), looping_interval_ms); Self::spawn_finished_monitor_task(transport.clone(), finished_flag.clone()); - Self::spawn_writing_loop_task(writer, rx, secure_channel.clone(), transport.clone(), send_buffer); - Self::spawn_reading_loop_task(reader, finished_flag.clone(), transport.clone(), tx, receive_buffer_size); + Self::spawn_writing_loop_task( + writer, + rx, + secure_channel.clone(), + transport.clone(), + send_buffer, + ); + Self::spawn_reading_loop_task( + reader, + finished_flag.clone(), + transport.clone(), + tx, + receive_buffer_size, + ); } fn make_session_id(component: &str, transport: Arc>) -> String { @@ -309,40 +333,52 @@ impl TcpTransport { /// Spawns the finished monitor task. This checks for the session to be in a finished /// state and ensures the session is placed into a finished state once the transport /// aborts or finishes. - fn spawn_finished_monitor_task(transport: Arc>, finished_flag: Arc>) { + fn spawn_finished_monitor_task( + transport: Arc>, + finished_flag: Arc>, + ) { let id = Self::make_session_id("finished_monitor_task", transport.clone()); let id_for_map = id.clone(); let id_for_map_err = id.clone(); register_runtime_component!(id); - let finished_monitor_task = Interval::new(Instant::now(), Duration::from_millis(constants::HELLO_TIMEOUT_POLL_MS)) - .take_while(move |_| { - trace!("finished_monitor_task.take_while"); - let (is_server_abort, is_finished) = { - let transport = trace_read_lock_unwrap!(transport); - (transport.is_server_abort(), transport.is_finished()) - }; - if !is_finished && is_server_abort { - let mut finished_flag = trace_write_lock_unwrap!(finished_flag); - *finished_flag = true; - } - future::ok(!is_server_abort && !is_finished) - }) - .for_each(move |_| Ok(())) - .map(|_| { - info!("Finished monitor task is finished"); - deregister_runtime_component!(id_for_map); - }) - .map_err(move |err| { - error!("Finished monitor task is finished with an error {:?}", err); - deregister_runtime_component!(id_for_map_err); - }); + let finished_monitor_task = Interval::new( + Instant::now(), + Duration::from_millis(constants::HELLO_TIMEOUT_POLL_MS), + ) + .take_while(move |_| { + trace!("finished_monitor_task.take_while"); + let (is_server_abort, is_finished) = { + let transport = trace_read_lock_unwrap!(transport); + (transport.is_server_abort(), transport.is_finished()) + }; + if !is_finished && is_server_abort { + let mut finished_flag = trace_write_lock_unwrap!(finished_flag); + *finished_flag = true; + } + future::ok(!is_server_abort && !is_finished) + }) + .for_each(move |_| Ok(())) + .map(|_| { + info!("Finished monitor task is finished"); + deregister_runtime_component!(id_for_map); + }) + .map_err(move |err| { + error!("Finished monitor task is finished with an error {:?}", err); + deregister_runtime_component!(id_for_map_err); + }); tokio::spawn(finished_monitor_task); } /// Spawns the writing loop task. The writing loop takes messages to send off of a queue /// and sends them to the stream. - fn spawn_writing_loop_task(writer: WriteHalf, receiver: UnboundedReceiver, secure_channel: Arc>, transport: Arc>, send_buffer: Arc>) { + fn spawn_writing_loop_task( + writer: WriteHalf, + receiver: UnboundedReceiver, + secure_channel: Arc>, + transport: Arc>, + send_buffer: Arc>, + ) { let id = Self::make_session_id("server_writing_loop_task", transport.clone()); let id_for_map = id.clone(); let id_for_map_err = id.clone(); @@ -358,103 +394,111 @@ impl TcpTransport { let connection_for_take_while = connection.clone(); // The writing task waits for messages that are to be sent - let looping_task = receiver.map(move |message| { - (message, connection.clone()) - }).take_while(move |(message, _)| { - trace!("write_looping_task.take_while"); - let mut transport = trace_write_lock_unwrap!(transport); - let take = match message { - Message::Quit => { - debug!("Server writer received a quit so it will quit"); - let mut connection = trace_lock_unwrap!(connection_for_take_while); - if let Some(ref mut writer) = connection.writer { - let _ = writer.shutdown(); + let looping_task = receiver + .map(move |message| (message, connection.clone())) + .take_while(move |(message, _)| { + trace!("write_looping_task.take_while"); + let mut transport = trace_write_lock_unwrap!(transport); + let take = match message { + Message::Quit => { + debug!("Server writer received a quit so it will quit"); + let mut connection = trace_lock_unwrap!(connection_for_take_while); + if let Some(ref mut writer) = connection.writer { + let _ = writer.shutdown(); + } + false } - false - } - Message::Message(_, response) => if let SupportedMessage::Invalid(_) = response { - error!("Writer terminating - received an invalid message"); - transport.finish(StatusCode::BadCommunicationError); - false - } else if transport.is_server_abort() { - info!("Writer terminating - communication error (abort)"); - transport.finish(StatusCode::BadCommunicationError); - false - } else if transport.is_finished() { - info!("Writer terminating - transport is finished"); - false - } else { - true - } - }; - future::ok(take) - }).for_each(move |(message, connection)| { - let (request_id, response) = match message { - Message::Quit => panic!(), - Message::Message(request_id, response) => (request_id, response) - }; - { - let connection = trace_lock_unwrap!(connection); - let mut secure_channel = trace_write_lock_unwrap!(connection.secure_channel); - let mut send_buffer = trace_lock_unwrap!(connection.send_buffer); - match response { - SupportedMessage::AcknowledgeMessage(ack) => { - let _ = send_buffer.write_ack(&ack); + Message::Message(_, response) => { + if let SupportedMessage::Invalid(_) = response { + error!("Writer terminating - received an invalid message"); + transport.finish(StatusCode::BadCommunicationError); + false + } else if transport.is_server_abort() { + info!("Writer terminating - communication error (abort)"); + transport.finish(StatusCode::BadCommunicationError); + false + } else if transport.is_finished() { + info!("Writer terminating - transport is finished"); + false + } else { + true + } } - msg => { - let _ = send_buffer.write(request_id, msg, &mut secure_channel); + }; + future::ok(take) + }) + .for_each(move |(message, connection)| { + let (request_id, response) = match message { + Message::Quit => panic!(), + Message::Message(request_id, response) => (request_id, response), + }; + { + let connection = trace_lock_unwrap!(connection); + let mut secure_channel = trace_write_lock_unwrap!(connection.secure_channel); + let mut send_buffer = trace_lock_unwrap!(connection.send_buffer); + match response { + SupportedMessage::AcknowledgeMessage(ack) => { + let _ = send_buffer.write_ack(&ack); + } + msg => { + let _ = send_buffer.write(request_id, msg, &mut secure_channel); + } } } - } - Self::write_bytes_task(connection) - .and_then(|connection| { - let finished = { - let connection = trace_lock_unwrap!(connection); - let transport = trace_read_lock_unwrap!(connection.transport); - transport.is_finished() - }; - if finished { - info!("Writer session status is terminating"); - { - let mut connection = trace_lock_unwrap!(connection); - if let Some(ref mut writer) = connection.writer { - let _ = writer.shutdown(); + Self::write_bytes_task(connection) + .and_then(|connection| { + let finished = { + let connection = trace_lock_unwrap!(connection); + let transport = trace_read_lock_unwrap!(connection.transport); + transport.is_finished() + }; + if finished { + info!("Writer session status is terminating"); + { + let mut connection = trace_lock_unwrap!(connection); + if let Some(ref mut writer) = connection.writer { + let _ = writer.shutdown(); + } } + Err(connection) + } else { + Ok(connection) } - Err(connection) - } else { - Ok(connection) - } - }) - .map(|_| { - trace!("Write bytes task finished"); - }) - .map_err(|connection| { - // Mark as finished just in case something else didn't - let connection = trace_lock_unwrap!(connection); - let mut transport = trace_write_lock_unwrap!(connection.transport); - if !transport.is_finished() { - error!("Write bytes task is in error and is finishing the transport"); - transport.finish(StatusCode::BadCommunicationError); - } else { - error!("Write bytes task is in error"); - }; - }) - }).map(move |_| { - info!("Writer is finished"); - deregister_runtime_component!(id_for_map); - }).map_err(move |err| { - error!("Writer is finished with an error {:?}", err); - deregister_runtime_component!(id_for_map_err); - }); + }) + .map(|_| { + trace!("Write bytes task finished"); + }) + .map_err(|connection| { + // Mark as finished just in case something else didn't + let connection = trace_lock_unwrap!(connection); + let mut transport = trace_write_lock_unwrap!(connection.transport); + if !transport.is_finished() { + error!("Write bytes task is in error and is finishing the transport"); + transport.finish(StatusCode::BadCommunicationError); + } else { + error!("Write bytes task is in error"); + }; + }) + }) + .map(move |_| { + info!("Writer is finished"); + deregister_runtime_component!(id_for_map); + }) + .map_err(move |err| { + error!("Writer is finished with an error {:?}", err); + deregister_runtime_component!(id_for_map_err); + }); tokio::spawn(looping_task); } /// Creates the framed read task / future. This will read chunks from the /// reader and process them. - fn framed_read_task(reader: ReadHalf, finished_flag: Arc>, connection: Arc>) -> impl Future - { + fn framed_read_task( + reader: ReadHalf, + finished_flag: Arc>, + connection: Arc>, + ) -> impl Future { let (transport, mut sender) = { let connection = trace_read_lock_unwrap!(connection); (connection.transport.clone(), connection.sender.clone()) @@ -514,7 +558,10 @@ impl TcpTransport { } // Update the session status and drop out if session_status_code.is_bad() { - error!("Server reader session status is {} so finishing", session_status_code); + error!( + "Server reader session status is {} so finishing", + session_status_code + ); let mut transport = trace_write_lock_unwrap!(transport); transport.finish(session_status_code); Err(std::io::ErrorKind::ConnectionReset.into()) @@ -533,7 +580,10 @@ impl TcpTransport { // Mark as finished just in case something else didn't let mut transport = trace_write_lock_unwrap!(transport_for_err); if !transport.is_finished() { - error!("Server reader is in error and is finishing the transport. {:?}", err); + error!( + "Server reader is in error and is finishing the transport. {:?}", + err + ); transport.finish(StatusCode::BadCommunicationError); } else { error!("Server reader error {:?}", err); @@ -543,7 +593,13 @@ impl TcpTransport { /// Spawns the reading loop where a reader task continuously reads messages, chunks from the /// input and process them. The reading task will terminate upon error. - fn spawn_reading_loop_task(reader: ReadHalf, finished_flag: Arc>, transport: Arc>, sender: UnboundedSender, receive_buffer_size: usize) { + fn spawn_reading_loop_task( + reader: ReadHalf, + finished_flag: Arc>, + transport: Arc>, + sender: UnboundedSender, + receive_buffer_size: usize, + ) { // Connection state is maintained for looping through each task let connection = Arc::new(RwLock::new(ReadState { transport: transport.clone(), @@ -603,7 +659,11 @@ impl TcpTransport { /// Makes the tokio task that looks for a hello timeout event, i.e. the connection is opened /// but no hello is received and we need to drop the session - fn spawn_hello_timeout_task(transport: Arc>, sender: UnboundedSender, session_start_time: chrono::DateTime) { + fn spawn_hello_timeout_task( + transport: Arc>, + sender: UnboundedSender, + session_start_time: chrono::DateTime, + ) { let id = Self::make_session_id("hello_timeout_task", transport.clone()); let id_for_map = id.clone(); let id_for_map_err = id.clone(); @@ -683,7 +743,11 @@ impl TcpTransport { } /// Start the subscription timer to service subscriptions - fn spawn_subscriptions_task(transport: Arc>, sender: UnboundedSender, looping_interval_ms: f64) { + fn spawn_subscriptions_task( + transport: Arc>, + sender: UnboundedSender, + looping_interval_ms: f64, + ) { /// Subscription events are passed sent from the monitor task to the receiver #[derive(Clone, Debug)] enum SubscriptionEvent { @@ -717,7 +781,10 @@ impl TcpTransport { let interval_duration = Duration::from_millis(looping_interval_ms as u64); let task = Interval::new(Instant::now(), interval_duration) .take_while(move |_| { - connection_finished_test!("subscriptions_task.take_while", transport_for_take_while) + connection_finished_test!( + "subscriptions_task.take_while", + transport_for_take_while + ) }) .for_each(move |_| { let transport = trace_read_lock_unwrap!(state.transport); @@ -731,12 +798,20 @@ impl TcpTransport { // Process subscriptions { let address_space = trace_read_lock_unwrap!(transport.address_space); - let _ = session.tick_subscriptions(&now, &address_space, TickReason::TickTimerFired); + let _ = session.tick_subscriptions( + &now, + &address_space, + TickReason::TickTimerFired, + ); } // Check if there are publish responses to send for transmission - if let Some(publish_responses) = session.subscriptions_mut().take_publish_responses() { - match subscription_tx.unbounded_send(SubscriptionEvent::PublishResponses(publish_responses)) { + if let Some(publish_responses) = + session.subscriptions_mut().take_publish_responses() + { + match subscription_tx + .unbounded_send(SubscriptionEvent::PublishResponses(publish_responses)) + { Err(error) => { error!("Cannot send publish responses, err = {}", error); } @@ -777,32 +852,47 @@ impl TcpTransport { // Clone the connection so the take_while predicate has its own instance let transport_for_take_while = state.transport.clone(); - tokio::spawn(subscription_rx - .take_while(move |_| { - connection_finished_test!("receiving_task.take_while", transport_for_take_while) - }) - .for_each(move |subscription_event| { - // Process publish response events - match subscription_event { - SubscriptionEvent::PublishResponses(publish_responses) => { - trace!("Got {} PublishResponse messages to send", publish_responses.len()); - for publish_response in publish_responses { - trace!("<-- Sending a Publish Response{}, {:?}", publish_response.request_id, &publish_response.response); - // Messages will be sent by the writing task - let _ = sender.unbounded_send(Message::Message(publish_response.request_id, publish_response.response)); + tokio::spawn( + subscription_rx + .take_while(move |_| { + connection_finished_test!( + "receiving_task.take_while", + transport_for_take_while + ) + }) + .for_each(move |subscription_event| { + // Process publish response events + match subscription_event { + SubscriptionEvent::PublishResponses(publish_responses) => { + trace!( + "Got {} PublishResponse messages to send", + publish_responses.len() + ); + for publish_response in publish_responses { + trace!( + "<-- Sending a Publish Response{}, {:?}", + publish_response.request_id, + &publish_response.response + ); + // Messages will be sent by the writing task + let _ = sender.unbounded_send(Message::Message( + publish_response.request_id, + publish_response.response, + )); + } } } - } - Ok(()) - }) - .map(move |_| { - info!("Subscription receiver is finished"); - deregister_runtime_component!(id_for_map); - }) - .map_err(move |err| { - info!("Subscription receiver is finished with an error {:?}", err); - deregister_runtime_component!(id_for_map_err); - })); + Ok(()) + }) + .map(move |_| { + info!("Subscription receiver is finished"); + deregister_runtime_component!(id_for_map); + }) + .map_err(move |err| { + info!("Subscription receiver is finished with an error {:?}", err); + deregister_runtime_component!(id_for_map_err); + }), + ); } } @@ -812,12 +902,17 @@ impl TcpTransport { server_state.is_abort() } - fn process_hello(&mut self, hello: HelloMessage, sender: &mut UnboundedSender) -> std::result::Result<(), StatusCode> { + fn process_hello( + &mut self, + hello: HelloMessage, + sender: &mut UnboundedSender, + ) -> std::result::Result<(), StatusCode> { let server_protocol_version = 0; let endpoints = { let server_state = trace_read_lock_unwrap!(self.server_state); server_state.endpoints(&hello.endpoint_url, &None) - }.unwrap(); + } + .unwrap(); trace!("Server received HELLO {:?}", hello); if !hello.is_endpoint_url_valid(&endpoints) { @@ -857,15 +952,26 @@ impl TcpTransport { Ok(()) } - fn turn_received_chunks_into_message(&mut self, chunks: &Vec) -> std::result::Result { + fn turn_received_chunks_into_message( + &mut self, + chunks: &Vec, + ) -> std::result::Result { // Validate that all chunks have incrementing sequence numbers and valid chunk types let secure_channel = trace_read_lock_unwrap!(self.secure_channel); - self.last_received_sequence_number = Chunker::validate_chunks(self.last_received_sequence_number + 1, &secure_channel, chunks)?; + self.last_received_sequence_number = Chunker::validate_chunks( + self.last_received_sequence_number + 1, + &secure_channel, + chunks, + )?; // Now decode Chunker::decode(&chunks, &secure_channel, None) } - fn process_chunk(&mut self, chunk: MessageChunk, sender: &mut UnboundedSender) -> std::result::Result<(), StatusCode> { + fn process_chunk( + &mut self, + chunk: MessageChunk, + sender: &mut UnboundedSender, + ) -> std::result::Result<(), StatusCode> { let decoding_limits = { let secure_channel = trace_read_lock_unwrap!(self.secure_channel); secure_channel.decoding_limits() @@ -899,11 +1005,15 @@ impl TcpTransport { } } - fn process_final_chunk(&mut self, message_header: &MessageChunkHeader, sender: &mut UnboundedSender) -> Result<(), StatusCode> { + fn process_final_chunk( + &mut self, + message_header: &MessageChunkHeader, + sender: &mut UnboundedSender, + ) -> Result<(), StatusCode> { // Drain pending chunks and turn them into a message let chunks: Vec = self.pending_chunks.drain(..).collect(); let chunk_info = { - let secure_channel = trace_read_lock_unwrap!( self.secure_channel); + let secure_channel = trace_read_lock_unwrap!(self.secure_channel); chunks[0].chunk_info(&secure_channel)? }; @@ -912,31 +1022,61 @@ impl TcpTransport { let request_id = chunk_info.sequence_header.request_id; let sender = MessageSender { - sender: sender.clone() + sender: sender.clone(), }; match message_header.message_type { - MessageChunkType::OpenSecureChannel => self.process_open_secure_channel(request_id, &request, &chunk_info.security_header, &sender), - MessageChunkType::CloseSecureChannel => self.process_close_secure_channel(request_id, &request, &sender), - MessageChunkType::Message => self.process_message(request_id, &request, &sender) + MessageChunkType::OpenSecureChannel => self.process_open_secure_channel( + request_id, + &request, + &chunk_info.security_header, + &sender, + ), + MessageChunkType::CloseSecureChannel => { + self.process_close_secure_channel(request_id, &request, &sender) + } + MessageChunkType::Message => self.process_message(request_id, &request, &sender), } } - fn process_open_secure_channel(&mut self, request_id: u32, request: &SupportedMessage, security_header: &SecurityHeader, sender: &MessageSender) -> Result<(), StatusCode> { + fn process_open_secure_channel( + &mut self, + request_id: u32, + request: &SupportedMessage, + security_header: &SecurityHeader, + sender: &MessageSender, + ) -> Result<(), StatusCode> { let mut secure_channel = trace_write_lock_unwrap!(self.secure_channel); - let response = self.secure_channel_service.open_secure_channel(&mut secure_channel, security_header, self.client_protocol_version, &request)?; + let response = self.secure_channel_service.open_secure_channel( + &mut secure_channel, + security_header, + self.client_protocol_version, + &request, + )?; let _ = sender.send_message(request_id, response); Ok(()) } - fn process_close_secure_channel(&mut self, request_id: u32, request: &SupportedMessage, sender: &MessageSender) -> Result<(), StatusCode> { + fn process_close_secure_channel( + &mut self, + request_id: u32, + request: &SupportedMessage, + sender: &MessageSender, + ) -> Result<(), StatusCode> { let response = self.secure_channel_service.close_secure_channel(request)?; let _ = sender.send_message(request_id, response); Ok(()) } - fn process_message(&mut self, request_id: u32, request: &SupportedMessage, sender: &MessageSender) -> Result<(), StatusCode> { - let _ = self.message_handler.handle_message(request_id, request, sender)?; + fn process_message( + &mut self, + request_id: u32, + request: &SupportedMessage, + sender: &MessageSender, + ) -> Result<(), StatusCode> { + let _ = self + .message_handler + .handle_message(request_id, request, sender)?; Ok(()) } } diff --git a/server/src/comms/transport.rs b/server/src/comms/transport.rs index eb61a84c1..50e1e90f4 100644 --- a/server/src/comms/transport.rs +++ b/server/src/comms/transport.rs @@ -30,7 +30,7 @@ pub trait Transport { fn has_received_hello(&self) -> bool { match self.state() { TransportState::New | TransportState::WaitingHello => false, - _ => true + _ => true, } } /// Terminate the session and put the connection in a finished state diff --git a/server/src/config.rs b/server/src/config.rs index 8d82f22f9..7e95589be 100644 --- a/server/src/config.rs +++ b/server/src/config.rs @@ -7,15 +7,11 @@ use std::collections::{BTreeMap, BTreeSet}; use std::path::PathBuf; use std::str::FromStr; -use opcua_core::{ - comms::url::url_matches_except_host, - config::Config, -}; +use opcua_core::{comms::url::url_matches_except_host, config::Config}; use opcua_crypto::{CertificateStore, SecurityPolicy, Thumbprint}; use opcua_types::{ - constants as opcua_types_constants, DecodingLimits, MessageSecurityMode, - service_types::ApplicationType, - UAString, + constants as opcua_types_constants, service_types::ApplicationType, DecodingLimits, + MessageSecurityMode, UAString, }; use crate::constants; @@ -48,7 +44,10 @@ pub struct ServerUserToken { impl ServerUserToken { /// Create a user pass token - pub fn user_pass(user: T, pass: T) -> Self where T: Into { + pub fn user_pass(user: T, pass: T) -> Self + where + T: Into, + { ServerUserToken { user: user.into(), pass: Some(pass.into()), @@ -58,7 +57,10 @@ impl ServerUserToken { } /// Create an X509 token. - pub fn x509(user: T, cert_path: &PathBuf) -> Self where T: Into { + pub fn x509(user: T, cert_path: &PathBuf) -> Self + where + T: Into, + { ServerUserToken { user: user.into(), pass: None, @@ -86,7 +88,10 @@ impl ServerUserToken { pub fn is_valid(&self, id: &str) -> bool { let mut valid = true; if id == ANONYMOUS_USER_TOKEN_ID { - error!("User token {} is invalid because id is a reserved value, use another value.", id); + error!( + "User token {} is invalid because id is a reserved value, use another value.", + id + ); valid = false; } if self.user.is_empty() { @@ -94,10 +99,16 @@ impl ServerUserToken { valid = false; } if self.pass.is_some() && self.x509.is_some() { - error!("User token {} holds a password and certificate info - it cannot be both.", id); + error!( + "User token {} holds a password and certificate info - it cannot be both.", + id + ); valid = false; } else if self.pass.is_none() && self.x509.is_none() { - error!("User token {} fails to provide a password or certificate info.", id); + error!( + "User token {} fails to provide a password or certificate info.", + id + ); valid = false; } valid @@ -180,7 +191,15 @@ impl<'a> From<(&'a str, SecurityPolicy, MessageSecurityMode, &'a [&'a str])> for } impl ServerEndpoint { - pub fn new(path: T, security_policy: SecurityPolicy, security_mode: MessageSecurityMode, user_token_ids: &[String]) -> Self where T: Into { + pub fn new( + path: T, + security_policy: SecurityPolicy, + security_mode: MessageSecurityMode, + user_token_ids: &[String], + ) -> Self + where + T: Into, + { ServerEndpoint { path: path.into(), security_policy: security_policy.to_string(), @@ -199,7 +218,7 @@ impl ServerEndpoint { SecurityPolicy::Basic256 => 3, SecurityPolicy::Basic256Sha256 => 4, SecurityPolicy::Aes256Sha256RsaPss => 5, - _ => 0 + _ => 0, }; if security_mode == MessageSecurityMode::SignAndEncrypt { security_level + 10 @@ -208,48 +227,136 @@ impl ServerEndpoint { } } - pub fn new_none(path: T, user_token_ids: &[String]) -> Self where T: Into { - Self::new(path, SecurityPolicy::None, MessageSecurityMode::None, user_token_ids) - } - - pub fn new_basic128rsa15_sign(path: T, user_token_ids: &[String]) -> Self where T: Into { - Self::new(path, SecurityPolicy::Basic128Rsa15, MessageSecurityMode::Sign, user_token_ids) - } - - pub fn new_basic128rsa15_sign_encrypt(path: T, user_token_ids: &[String]) -> Self where T: Into { - Self::new(path, SecurityPolicy::Basic128Rsa15, MessageSecurityMode::SignAndEncrypt, user_token_ids) - } - - pub fn new_basic256_sign(path: T, user_token_ids: &[String]) -> Self where T: Into { - Self::new(path, SecurityPolicy::Basic256, MessageSecurityMode::Sign, user_token_ids) - } - - pub fn new_basic256_sign_encrypt(path: T, user_token_ids: &[String]) -> Self where T: Into { - Self::new(path, SecurityPolicy::Basic256, MessageSecurityMode::SignAndEncrypt, user_token_ids) - } - - pub fn new_basic256sha256_sign(path: T, user_token_ids: &[String]) -> Self where T: Into { - Self::new(path, SecurityPolicy::Basic256Sha256, MessageSecurityMode::Sign, user_token_ids) - } - - pub fn new_basic256sha256_sign_encrypt(path: T, user_token_ids: &[String]) -> Self where T: Into { - Self::new(path, SecurityPolicy::Basic256Sha256, MessageSecurityMode::SignAndEncrypt, user_token_ids) - } - - pub fn new_aes128_sha256_rsaoaep_sign(path: T, user_token_ids: &[String]) -> Self where T: Into { - Self::new(path, SecurityPolicy::Aes128Sha256RsaOaep, MessageSecurityMode::Sign, user_token_ids) - } - - pub fn new_aes128_sha256_rsaoaep_sign_encrypt(path: T, user_token_ids: &[String]) -> Self where T: Into { - Self::new(path, SecurityPolicy::Aes128Sha256RsaOaep, MessageSecurityMode::SignAndEncrypt, user_token_ids) - } - - pub fn new_aes256_sha256_rsapss_sign(path: T, user_token_ids: &[String]) -> Self where T: Into { - Self::new(path, SecurityPolicy::Aes256Sha256RsaPss, MessageSecurityMode::Sign, user_token_ids) - } - - pub fn new_aes256_sha256_rsapss_sign_encrypt(path: T, user_token_ids: &[String]) -> Self where T: Into { - Self::new(path, SecurityPolicy::Aes256Sha256RsaPss, MessageSecurityMode::SignAndEncrypt, user_token_ids) + pub fn new_none(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::None, + MessageSecurityMode::None, + user_token_ids, + ) + } + + pub fn new_basic128rsa15_sign(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::Sign, + user_token_ids, + ) + } + + pub fn new_basic128rsa15_sign_encrypt(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::SignAndEncrypt, + user_token_ids, + ) + } + + pub fn new_basic256_sign(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Basic256, + MessageSecurityMode::Sign, + user_token_ids, + ) + } + + pub fn new_basic256_sign_encrypt(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Basic256, + MessageSecurityMode::SignAndEncrypt, + user_token_ids, + ) + } + + pub fn new_basic256sha256_sign(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Basic256Sha256, + MessageSecurityMode::Sign, + user_token_ids, + ) + } + + pub fn new_basic256sha256_sign_encrypt(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Basic256Sha256, + MessageSecurityMode::SignAndEncrypt, + user_token_ids, + ) + } + + pub fn new_aes128_sha256_rsaoaep_sign(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Aes128Sha256RsaOaep, + MessageSecurityMode::Sign, + user_token_ids, + ) + } + + pub fn new_aes128_sha256_rsaoaep_sign_encrypt(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Aes128Sha256RsaOaep, + MessageSecurityMode::SignAndEncrypt, + user_token_ids, + ) + } + + pub fn new_aes256_sha256_rsapss_sign(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Aes256Sha256RsaPss, + MessageSecurityMode::Sign, + user_token_ids, + ) + } + + pub fn new_aes256_sha256_rsapss_sign_encrypt(path: T, user_token_ids: &[String]) -> Self + where + T: Into, + { + Self::new( + path, + SecurityPolicy::Aes256Sha256RsaPss, + MessageSecurityMode::SignAndEncrypt, + user_token_ids, + ) } pub fn is_valid(&self, id: &str, user_tokens: &BTreeMap) -> bool { @@ -268,7 +375,8 @@ impl ServerEndpoint { } if let Some(ref password_security_policy) = self.password_security_policy { - let password_security_policy = SecurityPolicy::from_str(password_security_policy).unwrap(); + let password_security_policy = + SecurityPolicy::from_str(password_security_policy).unwrap(); if password_security_policy == SecurityPolicy::Unknown { error!("Endpoint {} is invalid. Password security policy \"{}\" is invalid. Valid values are None, Basic128Rsa15, Basic256, Basic256Sha256", id, password_security_policy); valid = false; @@ -284,11 +392,16 @@ impl ServerEndpoint { } else if security_mode == MessageSecurityMode::Invalid { error!("Endpoint {} is invalid. Security mode \"{}\" is invalid. Valid values are None, Sign, SignAndEncrypt", id, self.security_mode); valid = false; - } else if (security_policy == SecurityPolicy::None && security_mode != MessageSecurityMode::None) || - (security_policy != SecurityPolicy::None && security_mode == MessageSecurityMode::None) { + } else if (security_policy == SecurityPolicy::None + && security_mode != MessageSecurityMode::None) + || (security_policy != SecurityPolicy::None + && security_mode == MessageSecurityMode::None) + { error!("Endpoint {} is invalid. Security policy and security mode must both contain None or neither of them should (1).", id); valid = false; - } else if security_policy != SecurityPolicy::None && security_mode == MessageSecurityMode::None { + } else if security_policy != SecurityPolicy::None + && security_mode == MessageSecurityMode::None + { error!("Endpoint {} is invalid. Security policy and security mode must both contain None or neither of them should (2).", id); valid = false; } @@ -314,7 +427,10 @@ impl ServerEndpoint { if let Some(ref security_policy) = self.password_security_policy { match SecurityPolicy::from_str(security_policy).unwrap() { SecurityPolicy::Unknown => { - panic!("Password security policy {} is unrecognized", security_policy); + panic!( + "Password security policy {} is unrecognized", + security_policy + ); } security_policy => { password_security_policy = security_policy; @@ -453,16 +569,28 @@ impl Config for ServerConfig { valid } - fn application_name(&self) -> UAString { UAString::from(&self.application_name) } + fn application_name(&self) -> UAString { + UAString::from(&self.application_name) + } - fn application_uri(&self) -> UAString { UAString::from(&self.application_uri) } + fn application_uri(&self) -> UAString { + UAString::from(&self.application_uri) + } - fn product_uri(&self) -> UAString { UAString::from(&self.product_uri) } + fn product_uri(&self) -> UAString { + UAString::from(&self.product_uri) + } - fn application_type(&self) -> ApplicationType { ApplicationType::Server } + fn application_type(&self) -> ApplicationType { + ApplicationType::Server + } fn discovery_urls(&self) -> Option> { - let discovery_urls: Vec = self.discovery_urls.iter().map(|v| UAString::from(v)).collect(); + let discovery_urls: Vec = self + .discovery_urls + .iter() + .map(|v| UAString::from(v)) + .collect(); Some(discovery_urls) } } @@ -501,7 +629,14 @@ impl ServerConfig { /// The default PKI directory pub const PKI_DIR: &'static str = "pki"; - pub fn new(application_name: T, user_tokens: BTreeMap, endpoints: BTreeMap) -> Self where T: Into { + pub fn new( + application_name: T, + user_tokens: BTreeMap, + endpoints: BTreeMap, + ) -> Self + where + T: Into, + { let host = "127.0.0.1".to_string(); let port = constants::DEFAULT_RUST_OPC_UA_SERVER_PORT; @@ -553,12 +688,17 @@ impl ServerConfig { } pub fn read_x509_thumbprints(&mut self) { - self.user_tokens.iter_mut().for_each(|(_, token)| token.read_thumbprint()); + self.user_tokens + .iter_mut() + .for_each(|(_, token)| token.read_thumbprint()); } /// Returns a opc.tcp://server:port url that paths can be appended onto pub fn base_endpoint_url(&self) -> String { - format!("opc.tcp://{}:{}", self.tcp_config.host, self.tcp_config.port) + format!( + "opc.tcp://{}:{}", + self.tcp_config.host, self.tcp_config.port + ) } /// Find the default endpoint @@ -572,12 +712,19 @@ impl ServerConfig { /// Find the first endpoint that matches the specified url, security policy and message /// security mode. - pub fn find_endpoint(&self, endpoint_url: &str, security_policy: SecurityPolicy, security_mode: MessageSecurityMode) -> Option<&ServerEndpoint> { + pub fn find_endpoint( + &self, + endpoint_url: &str, + security_policy: SecurityPolicy, + security_mode: MessageSecurityMode, + ) -> Option<&ServerEndpoint> { let base_endpoint_url = self.base_endpoint_url(); let endpoint = self.endpoints.iter().find(|&(_, e)| { // Test end point's security_policy_uri and matching url if url_matches_except_host(&e.endpoint_url(&base_endpoint_url), endpoint_url) { - if e.security_policy() == security_policy && e.message_security_mode() == security_mode { + if e.security_policy() == security_policy + && e.message_security_mode() == security_mode + { trace!("Found matching endpoint for url {} - {:?}", endpoint_url, e); true } else { diff --git a/server/src/continuation_point.rs b/server/src/continuation_point.rs index 11327e70e..6732b453c 100644 --- a/server/src/continuation_point.rs +++ b/server/src/continuation_point.rs @@ -6,10 +6,7 @@ use std::sync::{Arc, Mutex}; -use opcua_types::{ - ByteString, DateTimeUtc, - service_types::ReferenceDescription, -}; +use opcua_types::{service_types::ReferenceDescription, ByteString, DateTimeUtc}; use crate::prelude::AddressSpace; @@ -28,4 +25,4 @@ impl BrowseContinuationPoint { pub fn is_valid_browse_continuation_point(&self, address_space: &AddressSpace) -> bool { self.address_space_last_modified >= address_space.last_modified() } -} \ No newline at end of file +} diff --git a/server/src/diagnostics.rs b/server/src/diagnostics.rs index 730dc1abd..1f967f2da 100644 --- a/server/src/diagnostics.rs +++ b/server/src/diagnostics.rs @@ -8,10 +8,7 @@ use opcua_types::service_types::ServerDiagnosticsSummaryDataType; use opcua_core::RUNTIME; -use crate::{ - subscriptions::subscription::Subscription, - session::Session, -}; +use crate::{session::Session, subscriptions::subscription::Subscription}; /// Structure that captures di agnostics information for the server #[derive(Clone, Serialize, Debug)] @@ -48,7 +45,8 @@ impl ServerDiagnostics { /// started (or restarted). The requests include all Services defined in Part 4, also requests /// to create sessions. pub(crate) fn on_rejected_security_session(&mut self) { - self.server_diagnostics_summary.security_rejected_session_count += 1; + self.server_diagnostics_summary + .security_rejected_session_count += 1; } /// Increment the number of requests that were rejected since the server was started (or restarted). The @@ -62,13 +60,19 @@ impl ServerDiagnostics { pub(crate) fn on_create_session(&mut self, _session: &Session) { self.server_diagnostics_summary.current_session_count += 1; self.server_diagnostics_summary.cumulated_session_count += 1; - debug!("Incrementing current session count to {}", self.server_diagnostics_summary.current_session_count); + debug!( + "Incrementing current session count to {}", + self.server_diagnostics_summary.current_session_count + ); } /// Decrement the number of client sessions currently established in the server. pub(crate) fn on_destroy_session(&mut self, _session: &Session) { self.server_diagnostics_summary.current_session_count -= 1; - debug!("Decrementing current session count to {}", self.server_diagnostics_summary.current_session_count); + debug!( + "Decrementing current session count to {}", + self.server_diagnostics_summary.current_session_count + ); } /// Increment the number of subscriptions currently established in the server. diff --git a/server/src/discovery/mod.rs b/server/src/discovery/mod.rs index 8eada438a..1ebba5de0 100644 --- a/server/src/discovery/mod.rs +++ b/server/src/discovery/mod.rs @@ -37,7 +37,10 @@ fn linux_lds_pki_dir() -> String { /// Registers the specified endpoints with the specified discovery server pub fn register_with_discovery_server(discovery_server_url: &str, server_state: &ServerState) { - debug!("register_with_discovery_server, for {}", discovery_server_url); + debug!( + "register_with_discovery_server, for {}", + discovery_server_url + ); let server_config = trace_read_lock_unwrap!(server_state.config); // Create a client, ensuring to retry only once @@ -61,18 +64,27 @@ pub fn register_with_discovery_server(discovery_server_url: &str, server_state: match client.register_server(discovery_server_url, registered_server) { Ok(_) => {} Err(err) => { - error!(r#"Cannot register server with discovery server \"{}\". + error!( + r#"Cannot register server with discovery server \"{}\". The errors immediately preceding this message may be caused by this issue. Check if the error "{}" indicates the reason why that the registration could not happen. Check that your server can connect to the discovery server and that your server's cert is trusted by the discovery server and vice versa. The discovery server's PKI directory is (Windows) -{} or (Linux) {}."#, discovery_server_url, err, windows_lds_pki_dir(), linux_lds_pki_dir()); +{} or (Linux) {}."#, + discovery_server_url, + err, + windows_lds_pki_dir(), + linux_lds_pki_dir() + ); } } } Err(err) => { - error!("Cannot find servers on discovery url {}, error = {:?}", discovery_server_url, err); + error!( + "Cannot find servers on discovery url {}, error = {:?}", + discovery_server_url, err + ); } } } else { diff --git a/server/src/events/audit/cancel_event.rs b/server/src/events/audit/cancel_event.rs index 76069b2d1..57dad3113 100644 --- a/server/src/events/audit/cancel_event.rs +++ b/server/src/events/audit/cancel_event.rs @@ -4,15 +4,9 @@ use opcua_types::*; -use crate::{ - address_space::address_space::AddressSpace, - events::event::Event, -}; +use crate::{address_space::address_space::AddressSpace, events::event::Event}; -use super::{ - session_events::AuditSessionEventType, - AuditEvent, -}; +use super::{session_events::AuditSessionEventType, AuditEvent}; pub struct AuditCancelEventType { base: AuditSessionEventType, @@ -39,7 +33,15 @@ impl Event for AuditCancelEventType { fn raise(&mut self, address_space: &mut AddressSpace) -> Result { let node_id = self.base.raise(address_space)?; let ns = node_id.namespace; - self.add_property(&node_id, NodeId::next_numeric(ns), "RequestHandle", "RequestHandle", DataTypeId::UInt32, self.request_handle, address_space); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "RequestHandle", + "RequestHandle", + DataTypeId::UInt32, + self.request_handle, + address_space, + ); Ok(node_id) } } diff --git a/server/src/events/audit/certificate_events.rs b/server/src/events/audit/certificate_events.rs index 3d69b8361..2f91ef4c1 100644 --- a/server/src/events/audit/certificate_events.rs +++ b/server/src/events/audit/certificate_events.rs @@ -4,15 +4,9 @@ use opcua_types::*; -use crate::{ - address_space::address_space::AddressSpace, - events::event::Event, -}; +use crate::{address_space::address_space::AddressSpace, events::event::Event}; -use super::{ - AuditEvent, - security_event::AuditSecurityEventType, -}; +use super::{security_event::AuditSecurityEventType, AuditEvent}; pub struct AuditCertificateEventType { base: AuditSecurityEventType, @@ -29,7 +23,15 @@ impl Event for AuditCertificateEventType { fn raise(&mut self, address_space: &mut AddressSpace) -> Result { let node_id = self.base.raise(address_space)?; let ns = node_id.namespace; - self.add_property(&node_id, NodeId::next_numeric(ns), "Certificate", "Certificate", DataTypeId::ByteString, self.certificate.clone(), address_space); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "Certificate", + "Certificate", + DataTypeId::ByteString, + self.certificate.clone(), + address_space, + ); Ok(node_id) } } @@ -47,14 +49,27 @@ impl AuditEvent for AuditCertificateEventType { audit_security_event_impl!(AuditCertificateEventType, base); impl AuditCertificateEventType { - pub fn new(node_id: R, event_type_id: E, browse_name: S, display_name: T, time: DateTime) -> Self - where R: Into, - E: Into, - S: Into, - T: Into, + pub fn new( + node_id: R, + event_type_id: E, + browse_name: S, + display_name: T, + time: DateTime, + ) -> Self + where + R: Into, + E: Into, + S: Into, + T: Into, { Self { - base: AuditSecurityEventType::new(node_id, event_type_id, browse_name, display_name, time), + base: AuditSecurityEventType::new( + node_id, + event_type_id, + browse_name, + display_name, + time, + ), certificate: ByteString::null(), } } @@ -71,7 +86,7 @@ macro_rules! audit_certificate_event_impl { audit_security_event_impl!($event, base); pub struct $event { - base: AuditCertificateEventType + base: AuditCertificateEventType, } impl Event for $event { @@ -98,12 +113,19 @@ macro_rules! audit_certificate_event_impl { impl $event { pub fn new(node_id: R, time: DateTime) -> Self - where R: Into, + where + R: Into, { let browse_name = stringify!($event); let display_name = stringify!($event); Self { - base: AuditCertificateEventType::new(node_id, Self::event_type_id(), browse_name, display_name, time), + base: AuditCertificateEventType::new( + node_id, + Self::event_type_id(), + browse_name, + display_name, + time, + ), } } @@ -112,7 +134,7 @@ macro_rules! audit_certificate_event_impl { self } } - } + }; } audit_certificate_event_impl!(AuditCertificateDataMismatchEventType); diff --git a/server/src/events/audit/event.rs b/server/src/events/audit/event.rs index fc9105cdd..d5c9471bc 100644 --- a/server/src/events/audit/event.rs +++ b/server/src/events/audit/event.rs @@ -28,9 +28,12 @@ impl AuditEvent for AuditEventType { fn log_message(&self) -> String { // Dump out comma-separated key=value pairs in the order they were populated - self.base.properties().iter() + self.base + .properties() + .iter() .map(|(k, v)| format!("{}={}", k, v)) - .collect::>().join(",") + .collect::>() + .join(",") } } @@ -41,16 +44,55 @@ impl Event for AuditEventType { self.base.is_valid() } - fn raise(&mut self, address_space: &mut AddressSpace) -> Result - { + fn raise(&mut self, address_space: &mut AddressSpace) -> Result { if self.is_valid() { let node_id = self.base.raise(address_space)?; let ns = node_id.namespace; - self.add_property(&node_id, NodeId::next_numeric(ns), "ActionTimeStamp", "ActionTimeStamp", DataTypeId::UtcTime, self.action_time_stamp.clone(), address_space); - self.add_property(&node_id, NodeId::next_numeric(ns), "Status", "Status", DataTypeId::Boolean, self.status, address_space); - self.add_property(&node_id, NodeId::next_numeric(ns), "ServerId", "ServerId", DataTypeId::String, self.server_id.clone(), address_space); - self.add_property(&node_id, NodeId::next_numeric(ns), "ClientAuditEntryId", "ClientAuditEntryId", DataTypeId::String, self.client_audit_entry_id.clone(), address_space); - self.add_property(&node_id, NodeId::next_numeric(ns), "ClientUserId", "ClientUserId", DataTypeId::String, self.client_user_id.clone(), address_space); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "ActionTimeStamp", + "ActionTimeStamp", + DataTypeId::UtcTime, + self.action_time_stamp.clone(), + address_space, + ); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "Status", + "Status", + DataTypeId::Boolean, + self.status, + address_space, + ); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "ServerId", + "ServerId", + DataTypeId::String, + self.server_id.clone(), + address_space, + ); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "ClientAuditEntryId", + "ClientAuditEntryId", + DataTypeId::String, + self.client_audit_entry_id.clone(), + address_space, + ); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "ClientUserId", + "ClientUserId", + DataTypeId::String, + self.client_user_id.clone(), + address_space, + ); Ok(node_id) } else { error!("AuditEventType is invalid and will not be inserted"); @@ -62,17 +104,31 @@ impl Event for AuditEventType { base_event_impl!(AuditEventType, base); impl AuditEventType { - pub fn new(node_id: R, event_type_id: E, browse_name: S, display_name: T, time: DateTime) -> Self - where R: Into, - E: Into, - S: Into, - T: Into, + pub fn new( + node_id: R, + event_type_id: E, + browse_name: S, + display_name: T, + time: DateTime, + ) -> Self + where + R: Into, + E: Into, + S: Into, + T: Into, { let action_time_stamp = DateTime::now(); let server_id = UAString::null(); let parent_node = Self::parent_node(); Self { - base: BaseEventType::new(node_id, event_type_id, browse_name, display_name, parent_node, time), + base: BaseEventType::new( + node_id, + event_type_id, + browse_name, + display_name, + parent_node, + time, + ), status: false, action_time_stamp, server_id, @@ -81,12 +137,18 @@ impl AuditEventType { } } - pub fn client_audit_entry_id(mut self, client_audit_entry_id: T) -> Self where T: Into { + pub fn client_audit_entry_id(mut self, client_audit_entry_id: T) -> Self + where + T: Into, + { self.client_audit_entry_id = client_audit_entry_id.into(); self } - pub fn client_user_id(mut self, client_user_id: T) -> Self where T: Into { + pub fn client_user_id(mut self, client_user_id: T) -> Self + where + T: Into, + { self.client_user_id = client_user_id.into(); self } @@ -96,7 +158,10 @@ impl AuditEventType { self } - pub fn server_id(mut self, server_id: T) -> Self where T: Into { + pub fn server_id(mut self, server_id: T) -> Self + where + T: Into, + { self.server_id = server_id.into(); self } @@ -112,12 +177,18 @@ macro_rules! audit_event_impl { base_event_impl!($event, $base); impl $event { - pub fn client_audit_entry_id(mut self, client_audit_entry_id: T) -> Self where T: Into { + pub fn client_audit_entry_id(mut self, client_audit_entry_id: T) -> Self + where + T: Into, + { self.$base = self.$base.client_audit_entry_id(client_audit_entry_id); self } - pub fn client_user_id(mut self, client_user_id: T) -> Self where T: Into { + pub fn client_user_id(mut self, client_user_id: T) -> Self + where + T: Into, + { self.$base = self.$base.client_user_id(client_user_id); self } @@ -127,7 +198,10 @@ macro_rules! audit_event_impl { self } - pub fn server_id(mut self, server_id: T) -> Self where T: Into { + pub fn server_id(mut self, server_id: T) -> Self + where + T: Into, + { self.$base = self.$base.server_id(server_id); self } @@ -137,5 +211,5 @@ macro_rules! audit_event_impl { self } } - } + }; } diff --git a/server/src/events/audit/mod.rs b/server/src/events/audit/mod.rs index c1b0538cc..05a376f7d 100644 --- a/server/src/events/audit/mod.rs +++ b/server/src/events/audit/mod.rs @@ -12,10 +12,7 @@ use std::sync::{Arc, RwLock}; use opcua_types::*; -use crate::{ - address_space::address_space::AddressSpace, - events::event::Event, -}; +use crate::{address_space::address_space::AddressSpace, events::event::Event}; pub trait AuditEvent: Event { fn parent_node() -> NodeId { @@ -49,12 +46,13 @@ pub(crate) struct AuditLog { impl AuditLog { pub fn new(address_space: Arc>) -> AuditLog { - AuditLog { - address_space - } + AuditLog { address_space } } - pub fn raise_and_log(&self, mut event: T) -> Result where T: AuditEvent + Event { + pub fn raise_and_log(&self, mut event: T) -> Result + where + T: AuditEvent + Event, + { let mut address_space = trace_write_lock_unwrap!(self.address_space); let result = event.raise(&mut address_space).map_err(|_| ()); if result.is_err() { @@ -65,4 +63,4 @@ impl AuditLog { info!("Audit Event: {}", event.log_message()); result } -} \ No newline at end of file +} diff --git a/server/src/events/audit/node_management_event.rs b/server/src/events/audit/node_management_event.rs index 7610c87c0..23c46cd7d 100644 --- a/server/src/events/audit/node_management_event.rs +++ b/server/src/events/audit/node_management_event.rs @@ -4,18 +4,12 @@ use opcua_types::*; -use crate::{ - address_space::address_space::AddressSpace, - events::event::Event, -}; +use crate::{address_space::address_space::AddressSpace, events::event::Event}; -use super::{ - AuditEvent, - event::AuditEventType, -}; +use super::{event::AuditEventType, AuditEvent}; pub struct AuditNodeManagementEventType { - base: AuditEventType + base: AuditEventType, } impl Event for AuditNodeManagementEventType { diff --git a/server/src/events/audit/security_event.rs b/server/src/events/audit/security_event.rs index 4cd06db62..4e6e20c06 100644 --- a/server/src/events/audit/security_event.rs +++ b/server/src/events/audit/security_event.rs @@ -4,19 +4,13 @@ use opcua_types::*; -use crate::{ - address_space::address_space::AddressSpace, - events::event::Event, -}; +use crate::{address_space::address_space::AddressSpace, events::event::Event}; -use super::{ - AuditEvent, - event::AuditEventType, -}; +use super::{event::AuditEventType, AuditEvent}; /// Base type for audit security events. Do not raise events of this type pub(super) struct AuditSecurityEventType { - base: AuditEventType + base: AuditEventType, } impl AuditEvent for AuditSecurityEventType { @@ -44,11 +38,18 @@ impl Event for AuditSecurityEventType { audit_event_impl!(AuditSecurityEventType, base); impl AuditSecurityEventType { - pub fn new(node_id: R, event_type_id: E, browse_name: S, display_name: T, time: DateTime) -> Self - where R: Into, - E: Into, - S: Into, - T: Into, + pub fn new( + node_id: R, + event_type_id: E, + browse_name: S, + display_name: T, + time: DateTime, + ) -> Self + where + R: Into, + E: Into, + S: Into, + T: Into, { Self { base: AuditEventType::new(node_id, event_type_id, browse_name, display_name, time), @@ -59,5 +60,5 @@ impl AuditSecurityEventType { macro_rules! audit_security_event_impl { ( $event:ident, $base:ident ) => { audit_event_impl!($event, $base); - } + }; } diff --git a/server/src/events/audit/session_events.rs b/server/src/events/audit/session_events.rs index c6b7eac38..189076694 100644 --- a/server/src/events/audit/session_events.rs +++ b/server/src/events/audit/session_events.rs @@ -5,15 +5,9 @@ use opcua_crypto::X509; use opcua_types::*; -use crate::{ - address_space::address_space::AddressSpace, - events::event::Event, -}; +use crate::{address_space::address_space::AddressSpace, events::event::Event}; -use super::{ - AuditEvent, - security_event::AuditSecurityEventType, -}; +use super::{security_event::AuditSecurityEventType, AuditEvent}; /// Base type for audit session events. Do not raise events of this type pub struct AuditSessionEventType { @@ -32,8 +26,9 @@ impl AuditCloseSessionReason { match self { AuditCloseSessionReason::CloseSession => "Session/CloseSession", AuditCloseSessionReason::Timeout => "Session/Timeout", - AuditCloseSessionReason::Terminated => "Session/Terminated" - }.into() + AuditCloseSessionReason::Terminated => "Session/Terminated", + } + .into() } } @@ -51,14 +46,21 @@ impl Event for AuditSessionEventType { type Err = (); fn is_valid(&self) -> bool { - !self.session_id.is_null() && - self.base.is_valid() + !self.session_id.is_null() && self.base.is_valid() } fn raise(&mut self, address_space: &mut AddressSpace) -> Result { let node_id = self.base.raise(address_space)?; let ns = node_id.namespace; - self.add_property(&node_id, NodeId::next_numeric(ns), "SessionId", "SessionId", DataTypeId::NodeId, self.session_id.clone(), address_space); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "SessionId", + "SessionId", + DataTypeId::NodeId, + self.session_id.clone(), + address_space, + ); Ok(node_id) } } @@ -66,26 +68,49 @@ impl Event for AuditSessionEventType { audit_security_event_impl!(AuditSessionEventType, base); impl AuditSessionEventType { - pub fn new(node_id: R, event_type_id: E, browse_name: S, display_name: T, time: DateTime) -> Self - where R: Into, - E: Into, - S: Into, - T: Into, + pub fn new( + node_id: R, + event_type_id: E, + browse_name: S, + display_name: T, + time: DateTime, + ) -> Self + where + R: Into, + E: Into, + S: Into, + T: Into, { Self { - base: AuditSecurityEventType::new(node_id, event_type_id, browse_name, display_name, time), + base: AuditSecurityEventType::new( + node_id, + event_type_id, + browse_name, + display_name, + time, + ), session_id: NodeId::null(), } } pub fn new_close_session(node_id: R, time: DateTime, reason: AuditCloseSessionReason) -> Self - where R: Into + where + R: Into, { - Self::new(node_id, Self::event_type_id(), "AuditSessionEventType", "AuditSessionEventType", time) - .source_name(reason.source_name()) + Self::new( + node_id, + Self::event_type_id(), + "AuditSessionEventType", + "AuditSessionEventType", + time, + ) + .source_name(reason.source_name()) } - pub fn session_id(mut self, session_id: T) -> Self where T: Into { + pub fn session_id(mut self, session_id: T) -> Self + where + T: Into, + { self.session_id = session_id.into(); self } @@ -96,12 +121,15 @@ macro_rules! audit_session_event_impl { audit_security_event_impl!($event, $base); impl $event { - pub fn session_id(mut self, session_id: T) -> $event where T: Into { + pub fn session_id(mut self, session_id: T) -> $event + where + T: Into, + { self.$base = self.$base.session_id(session_id); self } } - } + }; } //////////////////////////////////////////////////////////////////////////////////////////////////// @@ -128,19 +156,51 @@ impl Event for AuditCreateSessionEventType { type Err = (); fn is_valid(&self) -> bool { - !self.secure_channel_id.is_null() && - !self.client_certificate.is_null() && - !self.client_certificate_thumbprint.is_null() && - self.base.is_valid() + !self.secure_channel_id.is_null() + && !self.client_certificate.is_null() + && !self.client_certificate_thumbprint.is_null() + && self.base.is_valid() } fn raise(&mut self, address_space: &mut AddressSpace) -> Result { let node_id = self.base.raise(address_space)?; let ns = node_id.namespace; - self.add_property(&node_id, NodeId::next_numeric(ns), "SecureChannelId", "SecureChannelId", DataTypeId::String, self.secure_channel_id.clone(), address_space); - self.add_property(&node_id, NodeId::next_numeric(ns), "ClientCertificate", "ClientCertificate", DataTypeId::ByteString, self.client_certificate.clone(), address_space); - self.add_property(&node_id, NodeId::next_numeric(ns), "ClientCertificateThumbprint", "ClientCertificateThumbprint", DataTypeId::String, self.client_certificate_thumbprint.clone(), address_space); - self.add_property(&node_id, NodeId::next_numeric(ns), "RevisedSessionTimeout", "RevisedSessionTimeout", DataTypeId::Duration, self.revised_session_timeout, address_space); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "SecureChannelId", + "SecureChannelId", + DataTypeId::String, + self.secure_channel_id.clone(), + address_space, + ); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "ClientCertificate", + "ClientCertificate", + DataTypeId::ByteString, + self.client_certificate.clone(), + address_space, + ); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "ClientCertificateThumbprint", + "ClientCertificateThumbprint", + DataTypeId::String, + self.client_certificate_thumbprint.clone(), + address_space, + ); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "RevisedSessionTimeout", + "RevisedSessionTimeout", + DataTypeId::Duration, + self.revised_session_timeout, + address_space, + ); Ok(node_id) } } @@ -149,11 +209,18 @@ audit_session_event_impl!(AuditCreateSessionEventType, base); impl AuditCreateSessionEventType { pub fn new(node_id: R, time: DateTime) -> Self - where R: Into, + where + R: Into, { let event_type_id = ObjectTypeId::AuditCreateSessionEventType; Self { - base: AuditSessionEventType::new(node_id, event_type_id, "AuditCreateSessionEventType", "AuditCreateSessionEventType", time), + base: AuditSessionEventType::new( + node_id, + event_type_id, + "AuditCreateSessionEventType", + "AuditCreateSessionEventType", + time, + ), secure_channel_id: UAString::null(), client_certificate: ByteString::null(), client_certificate_thumbprint: UAString::null(), @@ -161,7 +228,10 @@ impl AuditCreateSessionEventType { } } - pub fn secure_channel_id(mut self, secure_channel_id: T) -> Self where T: Into { + pub fn secure_channel_id(mut self, secure_channel_id: T) -> Self + where + T: Into, + { self.secure_channel_id = secure_channel_id.into(); self } @@ -208,17 +278,50 @@ impl Event for AuditActivateSessionEventType { let node_id = self.base.raise(address_space)?; let ns = node_id.namespace; // Client software certificates is an array of extension objects (extension object i=344) - let client_software_certificates = - self.client_software_certificates.iter().map(|c| { - Variant::from(ExtensionObject::from_encodable(ObjectId::SignedSoftwareCertificate_Encoding_DefaultBinary, c)) - }).collect::>(); - self.add_property(&node_id, NodeId::next_numeric(ns), "ClientSoftwareCertificates", "ClientSoftwareCertificates", DataTypeId::SignedSoftwareCertificate, client_software_certificates, address_space); + let client_software_certificates = self + .client_software_certificates + .iter() + .map(|c| { + Variant::from(ExtensionObject::from_encodable( + ObjectId::SignedSoftwareCertificate_Encoding_DefaultBinary, + c, + )) + }) + .collect::>(); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "ClientSoftwareCertificates", + "ClientSoftwareCertificates", + DataTypeId::SignedSoftwareCertificate, + client_software_certificates, + address_space, + ); // User identity token (extension object i=316) - let user_identity_token = ExtensionObject::from_encodable(ObjectId::UserIdentityToken_Encoding_DefaultBinary, &self.user_identity_token); - self.add_property(&node_id, NodeId::next_numeric(ns), "UserIdentityToken", "UserIdentityToken", DataTypeId::UserIdentityToken, user_identity_token, address_space); - - self.add_property(&node_id, NodeId::next_numeric(ns), "SecureChannelId", "SecureChannelId", DataTypeId::String, self.secure_channel_id.clone(), address_space); + let user_identity_token = ExtensionObject::from_encodable( + ObjectId::UserIdentityToken_Encoding_DefaultBinary, + &self.user_identity_token, + ); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "UserIdentityToken", + "UserIdentityToken", + DataTypeId::UserIdentityToken, + user_identity_token, + address_space, + ); + + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "SecureChannelId", + "SecureChannelId", + DataTypeId::String, + self.secure_channel_id.clone(), + address_space, + ); Ok(node_id) } } @@ -227,18 +330,30 @@ audit_session_event_impl!(AuditActivateSessionEventType, base); impl AuditActivateSessionEventType { pub fn new(node_id: R, time: DateTime) -> Self - where R: Into, + where + R: Into, { let event_type_id = ObjectTypeId::AuditCreateSessionEventType; Self { - base: AuditSessionEventType::new(node_id, event_type_id, "AuditCreateSessionEventType", "AuditCreateSessionEventType", time), + base: AuditSessionEventType::new( + node_id, + event_type_id, + "AuditCreateSessionEventType", + "AuditCreateSessionEventType", + time, + ), client_software_certificates: Vec::new(), - user_identity_token: UserIdentityToken { policy_id: UAString::null() }, + user_identity_token: UserIdentityToken { + policy_id: UAString::null(), + }, secure_channel_id: UAString::null(), } } - pub fn client_software_certificates(mut self, client_software_certificates: Vec) -> Self { + pub fn client_software_certificates( + mut self, + client_software_certificates: Vec, + ) -> Self { self.client_software_certificates = client_software_certificates; self } @@ -249,7 +364,9 @@ impl AuditActivateSessionEventType { } pub fn secure_channel_id(mut self, secure_channel_id: T) -> Self - where T: Into { + where + T: Into, + { self.secure_channel_id = secure_channel_id.into(); self } diff --git a/server/src/events/event.rs b/server/src/events/event.rs index 98382523c..3986a61f0 100644 --- a/server/src/events/event.rs +++ b/server/src/events/event.rs @@ -4,16 +4,13 @@ //! Contains functions for generating events and adding them to the address space of the server. use opcua_types::{ - AttributeId, ByteString, DataTypeId, DateTime, DateTimeUtc, ExtensionObject, Guid, LocalizedText, NodeId, - NumericRange, ObjectId, ObjectTypeId, QualifiedName, service_types::TimeZoneDataType, TimestampsToReturn, - UAString, VariableTypeId, Variant, + service_types::TimeZoneDataType, AttributeId, ByteString, DataTypeId, DateTime, DateTimeUtc, + ExtensionObject, Guid, LocalizedText, NodeId, NumericRange, ObjectId, ObjectTypeId, + QualifiedName, TimestampsToReturn, UAString, VariableTypeId, Variant, }; use crate::address_space::{ - AddressSpace, - object::ObjectBuilder, - relative_path::*, - variable::VariableBuilder, + object::ObjectBuilder, relative_path::*, variable::VariableBuilder, AddressSpace, }; /// Events can implement this to populate themselves into the address space @@ -78,22 +75,26 @@ impl Event for BaseEventType { type Err = (); fn is_valid(&self) -> bool { - !self.node_id.is_null() && - !self.event_id.is_null_or_empty() && - !self.event_type.is_null() && - self.severity >= 1 && self.severity <= 1000 + !self.node_id.is_null() + && !self.event_id.is_null_or_empty() + && !self.event_type.is_null() + && self.severity >= 1 + && self.severity <= 1000 } - fn raise(&mut self, address_space: &mut AddressSpace) -> Result - { + fn raise(&mut self, address_space: &mut AddressSpace) -> Result { if self.is_valid() { // create an event object in a folder with the let ns = self.node_id.namespace; let node_id = self.node_id.clone(); - let object_builder = ObjectBuilder::new(&self.node_id, self.browse_name.clone(), self.display_name.clone()) - .organized_by(self.parent_node.clone()) - .has_type_definition(self.event_type.clone()); + let object_builder = ObjectBuilder::new( + &self.node_id, + self.browse_name.clone(), + self.display_name.clone(), + ) + .organized_by(self.parent_node.clone()) + .has_type_definition(self.event_type.clone()); let object_builder = if !self.source_node.is_null() { object_builder.has_event_source(self.source_node.clone()) @@ -103,20 +104,95 @@ impl Event for BaseEventType { object_builder.insert(address_space); // Mandatory properties - self.add_property(&node_id, NodeId::next_numeric(ns), "EventId", "EventId", DataTypeId::ByteString, self.event_id.clone(), address_space); - self.add_property(&node_id, NodeId::next_numeric(ns), "EventType", "EventType", DataTypeId::NodeId, self.event_type.clone(), address_space); - self.add_property(&node_id, NodeId::next_numeric(ns), "SourceNode", "SourceNode", DataTypeId::NodeId, self.source_node.clone(), address_space); - self.add_property(&node_id, NodeId::next_numeric(ns), "SourceName", "SourceName", DataTypeId::String, self.source_name.clone(), address_space); - self.add_property(&node_id, NodeId::next_numeric(ns), "Time", "Time", DataTypeId::UtcTime, self.time.clone(), address_space); - self.add_property(&node_id, NodeId::next_numeric(ns), "ReceiveTime", "ReceiveTime", DataTypeId::UtcTime, self.receive_time.clone(), address_space); - self.add_property(&node_id, NodeId::next_numeric(ns), "Message", "Message", DataTypeId::LocalizedText, self.message.clone(), address_space); - self.add_property(&node_id, NodeId::next_numeric(ns), "Severity", "Severity", DataTypeId::UInt16, self.severity, address_space); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "EventId", + "EventId", + DataTypeId::ByteString, + self.event_id.clone(), + address_space, + ); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "EventType", + "EventType", + DataTypeId::NodeId, + self.event_type.clone(), + address_space, + ); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "SourceNode", + "SourceNode", + DataTypeId::NodeId, + self.source_node.clone(), + address_space, + ); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "SourceName", + "SourceName", + DataTypeId::String, + self.source_name.clone(), + address_space, + ); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "Time", + "Time", + DataTypeId::UtcTime, + self.time.clone(), + address_space, + ); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "ReceiveTime", + "ReceiveTime", + DataTypeId::UtcTime, + self.receive_time.clone(), + address_space, + ); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "Message", + "Message", + DataTypeId::LocalizedText, + self.message.clone(), + address_space, + ); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "Severity", + "Severity", + DataTypeId::UInt16, + self.severity, + address_space, + ); // LocalTime is optional if let Some(ref local_time) = self.local_time { // Serialise to extension object - let local_time = ExtensionObject::from_encodable(ObjectId::TimeZoneDataType_Encoding_DefaultBinary, local_time); - self.add_property(&node_id, NodeId::next_numeric(ns), "LocalTime", "LocalTime", DataTypeId::TimeZoneDataType, local_time, address_space); + let local_time = ExtensionObject::from_encodable( + ObjectId::TimeZoneDataType_Encoding_DefaultBinary, + local_time, + ); + self.add_property( + &node_id, + NodeId::next_numeric(ns), + "LocalTime", + "LocalTime", + DataTypeId::TimeZoneDataType, + local_time, + address_space, + ); } Ok(node_id) @@ -128,23 +204,45 @@ impl Event for BaseEventType { } impl BaseEventType { - pub fn new_now(node_id: R, event_type_id: E, browse_name: S, display_name: T, parent_node: U) -> Self - where R: Into, - E: Into, - S: Into, - T: Into, - U: Into, + pub fn new_now( + node_id: R, + event_type_id: E, + browse_name: S, + display_name: T, + parent_node: U, + ) -> Self + where + R: Into, + E: Into, + S: Into, + T: Into, + U: Into, { let now = DateTime::now(); - Self::new(node_id, event_type_id, browse_name, display_name, parent_node, now) + Self::new( + node_id, + event_type_id, + browse_name, + display_name, + parent_node, + now, + ) } - pub fn new(node_id: R, event_type_id: E, browse_name: S, display_name: T, parent_node: U, time: DateTime) -> Self - where R: Into, - E: Into, - S: Into, - T: Into, - U: Into, + pub fn new( + node_id: R, + event_type_id: E, + browse_name: S, + display_name: T, + parent_node: U, + time: DateTime, + ) -> Self + where + R: Into, + E: Into, + S: Into, + T: Into, + U: Into, { Self { node_id: node_id.into(), @@ -165,27 +263,52 @@ impl BaseEventType { } /// Add a property to the event object - pub fn add_property(&mut self, event_id: &NodeId, property_id: T, browse_name: R, display_name: S, data_type: U, value: V, address_space: &mut AddressSpace) - where T: Into, - R: Into, - S: Into, - U: Into, - V: Into + pub fn add_property( + &mut self, + event_id: &NodeId, + property_id: T, + browse_name: R, + display_name: S, + data_type: U, + value: V, + address_space: &mut AddressSpace, + ) where + T: Into, + R: Into, + S: Into, + U: Into, + V: Into, { let display_name = display_name.into(); let value = value.into(); self.properties.push((display_name.clone(), value.clone())); - Self::do_add_property(event_id, property_id, browse_name, display_name, data_type, value, address_space) + Self::do_add_property( + event_id, + property_id, + browse_name, + display_name, + data_type, + value, + address_space, + ) } /// Helper function inserts a property for the event - fn do_add_property(event_id: &NodeId, property_id: T, browse_name: R, display_name: S, data_type: U, value: V, address_space: &mut AddressSpace) - where T: Into, - R: Into, - S: Into, - U: Into, - V: Into + fn do_add_property( + event_id: &NodeId, + property_id: T, + browse_name: R, + display_name: S, + data_type: U, + value: V, + address_space: &mut AddressSpace, + ) where + T: Into, + R: Into, + S: Into, + U: Into, + V: Into, { VariableBuilder::new(&property_id.into(), browse_name, display_name) .property_of(event_id.clone()) @@ -195,17 +318,26 @@ impl BaseEventType { .insert(address_space); } - pub fn message(mut self, message: T) -> Self where T: Into { + pub fn message(mut self, message: T) -> Self + where + T: Into, + { self.message = message.into(); self } - pub fn source_node(mut self, source_node: T) -> Self where T: Into { + pub fn source_node(mut self, source_node: T) -> Self + where + T: Into, + { self.source_node = source_node.into(); self } - pub fn source_name(mut self, source_name: T) -> Self where T: Into { + pub fn source_name(mut self, source_name: T) -> Self + where + T: Into, + { self.source_name = source_name.into(); self } @@ -235,27 +367,53 @@ impl BaseEventType { macro_rules! base_event_impl { ( $event:ident, $base:ident ) => { impl $event { - pub fn add_property(&mut self, event_id: &NodeId, property_id: T, browse_name: R, display_name: S, data_type: U, value: V, address_space: &mut AddressSpace) - where T: Into, - R: Into, - S: Into, - U: Into, - V: Into + pub fn add_property( + &mut self, + event_id: &NodeId, + property_id: T, + browse_name: R, + display_name: S, + data_type: U, + value: V, + address_space: &mut AddressSpace, + ) where + T: Into, + R: Into, + S: Into, + U: Into, + V: Into, { - self.$base.add_property(event_id, property_id, browse_name, display_name, data_type, value, address_space); + self.$base.add_property( + event_id, + property_id, + browse_name, + display_name, + data_type, + value, + address_space, + ); } - pub fn message(mut self, message: T) -> $event where T: Into { + pub fn message(mut self, message: T) -> $event + where + T: Into, + { self.$base = self.$base.message(message); self } - pub fn source_node(mut self, source_node: T) -> $event where T: Into { + pub fn source_node(mut self, source_node: T) -> $event + where + T: Into, + { self.$base = self.$base.source_node(source_node); self } - pub fn source_name(mut self, source_name: T) -> $event where T: Into { + pub fn source_name(mut self, source_name: T) -> $event + where + T: Into, + { self.$base = self.$base.source_name(source_name); self } @@ -275,16 +433,23 @@ macro_rules! base_event_impl { self } } - } + }; } fn event_source_node(event_id: &NodeId, address_space: &AddressSpace) -> Option { - if let Ok(event_time_node) = find_node_from_browse_path(address_space, event_id, &["SourceNode".into()]) { - if let Some(value) = event_time_node.as_node().get_attribute(TimestampsToReturn::Neither, AttributeId::Value, NumericRange::None, &QualifiedName::null()) { + if let Ok(event_time_node) = + find_node_from_browse_path(address_space, event_id, &["SourceNode".into()]) + { + if let Some(value) = event_time_node.as_node().get_attribute( + TimestampsToReturn::Neither, + AttributeId::Value, + NumericRange::None, + &QualifiedName::null(), + ) { if let Some(value) = value.value { match value { Variant::NodeId(node_id) => Some(*node_id), - _ => None + _ => None, } } else { None @@ -298,12 +463,19 @@ fn event_source_node(event_id: &NodeId, address_space: &AddressSpace) -> Option< } fn event_time(event_id: &NodeId, address_space: &AddressSpace) -> Option { - if let Ok(event_time_node) = find_node_from_browse_path(address_space, event_id, &["Time".into()]) { - if let Some(value) = event_time_node.as_node().get_attribute(TimestampsToReturn::Neither, AttributeId::Value, NumericRange::None, &QualifiedName::null()) { + if let Ok(event_time_node) = + find_node_from_browse_path(address_space, event_id, &["Time".into()]) + { + if let Some(value) = event_time_node.as_node().get_attribute( + TimestampsToReturn::Neither, + AttributeId::Value, + NumericRange::None, + &QualifiedName::null(), + ) { if let Some(value) = value.value { match value { Variant::DateTime(date_time) => Some(*date_time), - _ => None + _ => None, } } else { None @@ -316,16 +488,23 @@ fn event_time(event_id: &NodeId, address_space: &AddressSpace) -> Option(source_object_id: T, event_type_id: R, address_space: &AddressSpace, time_predicate: F) -> Option> - where T: Into, - R: Into, - F: Fn(&DateTimeUtc) -> bool +pub fn filter_events( + source_object_id: T, + event_type_id: R, + address_space: &AddressSpace, + time_predicate: F, +) -> Option> +where + T: Into, + R: Into, + F: Fn(&DateTimeUtc) -> bool, { let event_type_id = event_type_id.into(); let source_object_id = source_object_id.into(); // Find events of type event_type_id if let Some(events) = address_space.find_objects_by_type(event_type_id, true) { - let event_ids = events.iter() + let event_ids = events + .iter() .filter(move |event_id| { let mut filter = false; // Browse the relative path for the "Time" variable @@ -342,17 +521,32 @@ pub fn filter_events(source_object_id: T, event_type_id: R, address_spa }) .cloned() .collect::>(); - if event_ids.is_empty() { None } else { Some(event_ids) } + if event_ids.is_empty() { + None + } else { + Some(event_ids) + } } else { None } } -pub fn purge_events(source_object_id: T, event_type_id: R, address_space: &mut AddressSpace, happened_before: &DateTimeUtc) -> usize - where T: Into, - R: Into +pub fn purge_events( + source_object_id: T, + event_type_id: R, + address_space: &mut AddressSpace, + happened_before: &DateTimeUtc, +) -> usize +where + T: Into, + R: Into, { - if let Some(events) = filter_events(source_object_id, event_type_id, address_space, move |event_time| event_time < happened_before) { + if let Some(events) = filter_events( + source_object_id, + event_type_id, + address_space, + move |event_time| event_time < happened_before, + ) { // Delete these events from the address space info!("Deleting some events from the address space"); let len = events.len(); @@ -367,10 +561,20 @@ pub fn purge_events(source_object_id: T, event_type_id: R, address_space: } /// Searches for events of the specified event type which reference the source object -pub fn events_for_object(source_object_id: T, address_space: &AddressSpace, happened_since: &DateTimeUtc) -> Option> - where T: Into +pub fn events_for_object( + source_object_id: T, + address_space: &AddressSpace, + happened_since: &DateTimeUtc, +) -> Option> +where + T: Into, { - filter_events(source_object_id, ObjectTypeId::BaseEventType, address_space, move |event_time| event_time >= happened_since) + filter_events( + source_object_id, + ObjectTypeId::BaseEventType, + address_space, + move |event_time| event_time >= happened_since, + ) } #[test] @@ -380,11 +584,21 @@ fn test_event_source_node() { // Raise an event let event_id = NodeId::next_numeric(ns); let event_type_id = ObjectTypeId::BaseEventType; - let mut event = BaseEventType::new(&event_id, event_type_id, "Event1", "", NodeId::objects_folder_id(), DateTime::now()) - .source_node(ObjectId::Server_ServerCapabilities); + let mut event = BaseEventType::new( + &event_id, + event_type_id, + "Event1", + "", + NodeId::objects_folder_id(), + DateTime::now(), + ) + .source_node(ObjectId::Server_ServerCapabilities); assert!(event.raise(&mut address_space).is_ok()); // Check that the helper fn returns the expected source node - assert_eq!(event_source_node(&event_id, &address_space).unwrap(), ObjectId::Server_ServerCapabilities.into()); + assert_eq!( + event_source_node(&event_id, &address_space).unwrap(), + ObjectId::Server_ServerCapabilities.into() + ); } #[test] @@ -394,15 +608,24 @@ fn test_event_time() { // Raise an event let event_id = NodeId::next_numeric(ns); let event_type_id = ObjectTypeId::BaseEventType; - let mut event = BaseEventType::new(&event_id, event_type_id, "Event1", "", NodeId::objects_folder_id(), DateTime::now()) - .source_node(ObjectId::Server_ServerCapabilities); + let mut event = BaseEventType::new( + &event_id, + event_type_id, + "Event1", + "", + NodeId::objects_folder_id(), + DateTime::now(), + ) + .source_node(ObjectId::Server_ServerCapabilities); let expected_time = event.time.clone(); assert!(event.raise(&mut address_space).is_ok()); // Check that the helper fn returns the expected source node - assert_eq!(event_time(&event_id, &address_space).unwrap(), expected_time); + assert_eq!( + event_time(&event_id, &address_space).unwrap(), + expected_time + ); } - #[test] fn test_events_for_object() { let mut address_space = AddressSpace::new(); @@ -412,12 +635,24 @@ fn test_events_for_object() { let happened_since = chrono::Utc::now(); let event_id = NodeId::next_numeric(ns); let event_type_id = ObjectTypeId::BaseEventType; - let mut event = BaseEventType::new(&event_id, event_type_id, "Event1", "", NodeId::objects_folder_id(), DateTime::now()) - .source_node(ObjectId::Server_ServerCapabilities); + let mut event = BaseEventType::new( + &event_id, + event_type_id, + "Event1", + "", + NodeId::objects_folder_id(), + DateTime::now(), + ) + .source_node(ObjectId::Server_ServerCapabilities); assert!(event.raise(&mut address_space).is_ok()); // Check that event can be found - let mut events = events_for_object(ObjectId::Server_ServerCapabilities, &address_space, &happened_since).unwrap(); + let mut events = events_for_object( + ObjectId::Server_ServerCapabilities, + &address_space, + &happened_since, + ) + .unwrap(); assert_eq!(events.len(), 1); assert_eq!(events.pop().unwrap(), event_id); } @@ -444,7 +679,7 @@ fn test_purge_events() { // test but that does not matter. let first_node_id = match NodeId::next_numeric(ns).identifier { Identifier::Numeric(i) => i + 1, - _ => panic!() + _ => panic!(), }; let source_node = ObjectId::Server_ServerCapabilities; @@ -459,8 +694,15 @@ fn test_purge_events() { (0..10).for_each(|i| { let event_id = NodeId::new(ns, format!("Event{}", i)); let event_name = format!("Event {}", i); - let mut event = BaseEventType::new(&event_id, event_type_id, event_name, "", NodeId::objects_folder_id(), DateTime::from(time)) - .source_node(source_node); + let mut event = BaseEventType::new( + &event_id, + event_type_id, + event_name, + "", + NodeId::objects_folder_id(), + DateTime::from(time), + ) + .source_node(source_node); assert!(event.raise(&mut address_space).is_ok()); // The first 5 events will be purged, so note the last node id here because none of the @@ -468,7 +710,7 @@ fn test_purge_events() { if i == 4 { last_purged_node_id = match NodeId::next_numeric(ns).identifier { Identifier::Numeric(i) => i, - _ => panic!() + _ => panic!(), }; } @@ -481,7 +723,15 @@ fn test_purge_events() { // Purge all events up to halfway let happened_before = start_time + chrono::Duration::minutes(25); - assert_eq!(purge_events(source_node, ObjectTypeId::BaseEventType, &mut address_space, &happened_before), 5); + assert_eq!( + purge_events( + source_node, + ObjectTypeId::BaseEventType, + &mut address_space, + &happened_before + ), + 5 + ); // Should have only 5 events left let events = events_for_object(source_node, &address_space, &start_time).unwrap(); diff --git a/server/src/events/event_filter.rs b/server/src/events/event_filter.rs index 6ba28bf2a..d61771b65 100644 --- a/server/src/events/event_filter.rs +++ b/server/src/events/event_filter.rs @@ -5,31 +5,33 @@ use std::convert::TryFrom; use opcua_types::{ - AttributeId, DateTimeUtc, NodeId, operand::Operand, + operand::Operand, service_types::{ - ContentFilter, ContentFilterElementResult, ContentFilterResult, EventFieldList, EventFilter, - EventFilterResult, FilterOperator, SimpleAttributeOperand, + ContentFilter, ContentFilterElementResult, ContentFilterResult, EventFieldList, + EventFilter, EventFilterResult, FilterOperator, SimpleAttributeOperand, }, status_code::StatusCode, - Variant, + AttributeId, DateTimeUtc, NodeId, Variant, }; use crate::{ - address_space::{ - address_space::AddressSpace, - node::NodeType, - relative_path::*, - }, + address_space::{address_space::AddressSpace, node::NodeType, relative_path::*}, events::event::events_for_object, events::operator, }; /// This validates the event filter as best it can to make sure it doesn't contain nonsense. -pub fn validate(event_filter: &EventFilter, address_space: &AddressSpace) -> Result { +pub fn validate( + event_filter: &EventFilter, + address_space: &AddressSpace, +) -> Result { let select_clause_results = if let Some(ref select_clauses) = event_filter.select_clauses { - Some(select_clauses.iter().map(|clause| { - validate_select_clause(clause, address_space) - }).collect()) + Some( + select_clauses + .iter() + .map(|clause| validate_select_clause(clause, address_space)) + .collect(), + ) } else { None }; @@ -42,12 +44,20 @@ pub fn validate(event_filter: &EventFilter, address_space: &AddressSpace) -> Res } /// Evaluate the event filter and see if it triggers. -pub fn evaluate(object_id: &NodeId, event_filter: &EventFilter, address_space: &AddressSpace, happened_since: &DateTimeUtc, client_handle: u32) -> Option> -{ +pub fn evaluate( + object_id: &NodeId, + event_filter: &EventFilter, + address_space: &AddressSpace, + happened_since: &DateTimeUtc, + client_handle: u32, +) -> Option> { if let Some(events) = events_for_object(object_id, address_space, happened_since) { - let event_fields = events.iter() + let event_fields = events + .iter() .filter(|event_id| { - if let Ok(result) = evaluate_where_clause(event_id, &event_filter.where_clause, address_space) { + if let Ok(result) = + evaluate_where_clause(event_id, &event_filter.where_clause, address_space) + { result == Variant::Boolean(true) } else { false @@ -56,9 +66,14 @@ pub fn evaluate(object_id: &NodeId, event_filter: &EventFilter, address_space: & .map(|event_id| { // Produce an event notification list from the select clauses. let event_fields = if let Some(ref select_clauses) = event_filter.select_clauses { - Some(select_clauses.iter().map(|v| { - operator::value_of_simple_attribute(event_id, v, address_space) - }).collect()) + Some( + select_clauses + .iter() + .map(|v| { + operator::value_of_simple_attribute(event_id, v, address_space) + }) + .collect(), + ) } else { None }; @@ -66,7 +81,8 @@ pub fn evaluate(object_id: &NodeId, event_filter: &EventFilter, address_space: & client_handle, event_fields, } - }).collect::>(); + }) + .collect::>(); if event_fields.is_empty() { None } else { @@ -78,14 +94,24 @@ pub fn evaluate(object_id: &NodeId, event_filter: &EventFilter, address_space: & } /// Evaluates a where clause which is a tree of conditionals -pub(crate) fn evaluate_where_clause(object_id: &NodeId, where_clause: &ContentFilter, address_space: &AddressSpace) -> Result { +pub(crate) fn evaluate_where_clause( + object_id: &NodeId, + where_clause: &ContentFilter, + address_space: &AddressSpace, +) -> Result { // Clause is meant to have been validated before now so this code is not as stringent and makes some expectations. if let Some(ref elements) = where_clause.elements { if !elements.is_empty() { use std::collections::HashSet; let mut used_elements = HashSet::new(); used_elements.insert(0); - let result = operator::evaluate(object_id, &elements[0], &mut used_elements, elements, address_space)?; + let result = operator::evaluate( + object_id, + &elements[0], + &mut used_elements, + elements, + address_space, + )?; Ok(result) } else { Ok(true.into()) @@ -95,7 +121,10 @@ pub(crate) fn evaluate_where_clause(object_id: &NodeId, where_clause: &ContentFi } } -fn validate_select_clause(clause: &SimpleAttributeOperand, address_space: &AddressSpace) -> StatusCode { +fn validate_select_clause( + clause: &SimpleAttributeOperand, + address_space: &AddressSpace, +) -> StatusCode { // The SimpleAttributeOperand structure is used in the selectClauses to select the value to return // if an Event meets the criteria specified by the whereClause. A null value is returned in the corresponding // event field in the publish response if the selected field is not part of the event or an @@ -107,7 +136,9 @@ fn validate_select_clause(clause: &SimpleAttributeOperand, address_space: &Addre StatusCode::BadIndexRangeInvalid } else if let Some(ref browse_path) = clause.browse_path { // Validate that the browse paths seem okay relative to the object type definition in the clause - if let Ok(node) = find_node_from_browse_path(&address_space, &clause.type_definition_id, browse_path) { + if let Ok(node) = + find_node_from_browse_path(&address_space, &clause.type_definition_id, browse_path) + { // Validate the attribute id. Per spec: // // The SimpleAttributeOperand allows the client to specify any attribute; however the server @@ -146,7 +177,10 @@ fn validate_select_clause(clause: &SimpleAttributeOperand, address_space: &Addre } } -fn validate_where_clause(where_clause: &ContentFilter, address_space: &AddressSpace) -> Result { +fn validate_where_clause( + where_clause: &ContentFilter, + address_space: &AddressSpace, +) -> Result { // The ContentFilter structure defines a collection of elements that define filtering criteria. // Each element in the collection describes an operator and an array of operands to be used by // the operator. The operators that can be used in a ContentFilter are described in Table 119. @@ -279,7 +313,6 @@ fn validate_where_clause(where_clause: &ContentFilter, address_space: &AddressSp } } - #[test] fn validate_where_clause_test() { use opcua_types::service_types::ContentFilterElement; @@ -287,15 +320,16 @@ fn validate_where_clause_test() { let address_space = AddressSpace::new(); { - let where_clause = ContentFilter { - elements: None - }; + let where_clause = ContentFilter { elements: None }; // check for at least one filter operand let result = validate_where_clause(&where_clause, &address_space); - assert_eq!(result.unwrap(), ContentFilterResult { - element_results: None, - element_diagnostic_infos: None, - }); + assert_eq!( + result.unwrap(), + ContentFilterResult { + element_results: None, + element_diagnostic_infos: None, + } + ); } // Make a where clause where every single operator is included but each has the wrong number of operands. @@ -305,43 +339,63 @@ fn validate_where_clause_test() { elements: Some(vec![ ContentFilterElement::from((FilterOperator::Equals, vec![Operand::literal(10)])), ContentFilterElement::from((FilterOperator::IsNull, vec![])), - ContentFilterElement::from((FilterOperator::GreaterThan, vec![Operand::literal(10)])), + ContentFilterElement::from(( + FilterOperator::GreaterThan, + vec![Operand::literal(10)], + )), ContentFilterElement::from((FilterOperator::LessThan, vec![Operand::literal(10)])), - ContentFilterElement::from((FilterOperator::GreaterThanOrEqual, vec![Operand::literal(10)])), - ContentFilterElement::from((FilterOperator::LessThanOrEqual, vec![Operand::literal(10)])), + ContentFilterElement::from(( + FilterOperator::GreaterThanOrEqual, + vec![Operand::literal(10)], + )), + ContentFilterElement::from(( + FilterOperator::LessThanOrEqual, + vec![Operand::literal(10)], + )), ContentFilterElement::from((FilterOperator::Like, vec![Operand::literal(10)])), ContentFilterElement::from((FilterOperator::Not, vec![])), - ContentFilterElement::from((FilterOperator::Between, vec![Operand::literal(10), Operand::literal(20)])), + ContentFilterElement::from(( + FilterOperator::Between, + vec![Operand::literal(10), Operand::literal(20)], + )), ContentFilterElement::from((FilterOperator::InList, vec![Operand::literal(10)])), ContentFilterElement::from((FilterOperator::And, vec![Operand::literal(10)])), ContentFilterElement::from((FilterOperator::Or, vec![Operand::literal(10)])), ContentFilterElement::from((FilterOperator::Cast, vec![Operand::literal(10)])), - ContentFilterElement::from((FilterOperator::BitwiseAnd, vec![Operand::literal(10)])), + ContentFilterElement::from(( + FilterOperator::BitwiseAnd, + vec![Operand::literal(10)], + )), ContentFilterElement::from((FilterOperator::BitwiseOr, vec![Operand::literal(10)])), ContentFilterElement::from((FilterOperator::Like, vec![Operand::literal(10)])), - ]) + ]), }; // Check for less than required number of operands let result = validate_where_clause(&where_clause, &address_space).unwrap(); - result.element_results.unwrap().iter().for_each(|e| { - assert_eq!(e.status_code, StatusCode::BadFilterOperandCountMismatch) - }); + result + .element_results + .unwrap() + .iter() + .for_each(|e| assert_eq!(e.status_code, StatusCode::BadFilterOperandCountMismatch)); } // check for filter operator invalid, by giving it a bogus extension object for an element { - use opcua_types::{ExtensionObject, service_types::ContentFilterElement}; + use opcua_types::{service_types::ContentFilterElement, ExtensionObject}; let bad_operator = ExtensionObject::null(); let where_clause = ContentFilter { elements: Some(vec![ContentFilterElement { filter_operator: FilterOperator::IsNull, filter_operands: Some(vec![bad_operator]), - }]) + }]), }; let result = validate_where_clause(&where_clause, &address_space).unwrap(); let element_results = result.element_results.unwrap(); assert_eq!(element_results.len(), 1); - assert_eq!(element_results[0].status_code, StatusCode::BadFilterOperatorInvalid); + assert_eq!( + element_results[0].status_code, + StatusCode::BadFilterOperatorInvalid + ); } // TODO check operands are compatible with operator diff --git a/server/src/events/mod.rs b/server/src/events/mod.rs index 5c66e8d26..beec43b48 100644 --- a/server/src/events/mod.rs +++ b/server/src/events/mod.rs @@ -2,9 +2,9 @@ // SPDX-License-Identifier: MPL-2.0 // Copyright (C) 2017-2020 Adam Lock -pub(crate) mod operator; pub mod event_filter; +pub(crate) mod operator; #[macro_use] pub mod event; #[macro_use] -pub mod audit; \ No newline at end of file +pub mod audit; diff --git a/server/src/events/operator.rs b/server/src/events/operator.rs index eb11e5d6e..a339331e2 100644 --- a/server/src/events/operator.rs +++ b/server/src/events/operator.rs @@ -9,23 +9,24 @@ use std::convert::TryFrom; use regex::Regex; use opcua_types::{ - AttributeId, ExtensionObject, NodeId, NumericRange, operand::Operand, QualifiedName, + operand::Operand, service_types::{ContentFilterElement, FilterOperator, SimpleAttributeOperand}, - status_code::StatusCode, TimestampsToReturn, - Variant, + status_code::StatusCode, + AttributeId, ExtensionObject, NodeId, NumericRange, QualifiedName, TimestampsToReturn, Variant, VariantTypeId, }; use crate::address_space::{ - AddressSpace, node::{NodeBase, NodeType}, relative_path::find_node_from_browse_path, + AddressSpace, }; /// Turns a list of operands inside extension objects to their analogous Operand objects fn make_filter_operands(filter_operands: &[ExtensionObject]) -> Result, StatusCode> { // If any operand cannot be converted then the whole action is in error - let operands = filter_operands.iter() + let operands = filter_operands + .iter() .map(|v| Operand::try_from(v)) .take_while(|v| v.is_ok()) .map(|v| v.unwrap()) @@ -41,7 +42,13 @@ fn make_filter_operands(filter_operands: &[ExtensionObject]) -> Result, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { +pub(crate) fn evaluate( + object_id: &NodeId, + element: &ContentFilterElement, + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { if let Some(ref filter_operands) = element.filter_operands { if filter_operands.len() > 0 { // Turn ExtensionObjects into Operands here. This should be externalised even further so it @@ -49,22 +56,112 @@ pub(crate) fn evaluate(object_id: &NodeId, element: &ContentFilterElement, used_ // which has the operands . let operands = make_filter_operands(filter_operands)?; match element.filter_operator { - FilterOperator::Equals => eq(object_id, &operands[..], used_elements, elements, address_space), - FilterOperator::IsNull => is_null(object_id, &operands[..], used_elements, elements, address_space), - FilterOperator::GreaterThan => gt(object_id, &operands[..], used_elements, elements, address_space), - FilterOperator::LessThan => lt(object_id, &operands[..], used_elements, elements, address_space), - FilterOperator::GreaterThanOrEqual => gte(object_id, &operands[..], used_elements, elements, address_space), - FilterOperator::LessThanOrEqual => lte(object_id, &operands[..], used_elements, elements, address_space), - FilterOperator::Like => like(object_id, &operands[..], used_elements, elements, address_space), - FilterOperator::Not => not(object_id, &operands[..], used_elements, elements, address_space), - FilterOperator::Between => between(object_id, &operands[..], used_elements, elements, address_space), - FilterOperator::InList => in_list(object_id, &operands[..], used_elements, elements, address_space), - FilterOperator::And => and(object_id, &operands[..], used_elements, elements, address_space), - FilterOperator::Or => or(object_id, &operands[..], used_elements, elements, address_space), - FilterOperator::Cast => cast(object_id, &operands[..], used_elements, elements, address_space), - FilterOperator::BitwiseAnd => bitwise_and(object_id, &operands[..], used_elements, elements, address_space), - FilterOperator::BitwiseOr => bitwise_or(object_id, &operands[..], used_elements, elements, address_space), - _ => Err(StatusCode::BadFilterOperatorUnsupported) + FilterOperator::Equals => eq( + object_id, + &operands[..], + used_elements, + elements, + address_space, + ), + FilterOperator::IsNull => is_null( + object_id, + &operands[..], + used_elements, + elements, + address_space, + ), + FilterOperator::GreaterThan => gt( + object_id, + &operands[..], + used_elements, + elements, + address_space, + ), + FilterOperator::LessThan => lt( + object_id, + &operands[..], + used_elements, + elements, + address_space, + ), + FilterOperator::GreaterThanOrEqual => gte( + object_id, + &operands[..], + used_elements, + elements, + address_space, + ), + FilterOperator::LessThanOrEqual => lte( + object_id, + &operands[..], + used_elements, + elements, + address_space, + ), + FilterOperator::Like => like( + object_id, + &operands[..], + used_elements, + elements, + address_space, + ), + FilterOperator::Not => not( + object_id, + &operands[..], + used_elements, + elements, + address_space, + ), + FilterOperator::Between => between( + object_id, + &operands[..], + used_elements, + elements, + address_space, + ), + FilterOperator::InList => in_list( + object_id, + &operands[..], + used_elements, + elements, + address_space, + ), + FilterOperator::And => and( + object_id, + &operands[..], + used_elements, + elements, + address_space, + ), + FilterOperator::Or => or( + object_id, + &operands[..], + used_elements, + elements, + address_space, + ), + FilterOperator::Cast => cast( + object_id, + &operands[..], + used_elements, + elements, + address_space, + ), + FilterOperator::BitwiseAnd => bitwise_and( + object_id, + &operands[..], + used_elements, + elements, + address_space, + ), + FilterOperator::BitwiseOr => bitwise_or( + object_id, + &operands[..], + used_elements, + elements, + address_space, + ), + _ => Err(StatusCode::BadFilterOperatorUnsupported), } } else { // All operators need at least one operand @@ -79,12 +176,23 @@ pub(crate) fn evaluate(object_id: &NodeId, element: &ContentFilterElement, used_ } /// Get the value of something and convert to the expected type. -fn value_as(object_id: &NodeId, as_type: VariantTypeId, operand: &Operand, used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { +fn value_as( + object_id: &NodeId, + as_type: VariantTypeId, + operand: &Operand, + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { let v = value_of(object_id, operand, used_elements, elements, address_space)?; Ok(v.convert(as_type)) } -pub(crate) fn value_of_simple_attribute(object_id: &NodeId, o: &SimpleAttributeOperand, address_space: &AddressSpace) -> Variant { +pub(crate) fn value_of_simple_attribute( + object_id: &NodeId, + o: &SimpleAttributeOperand, + address_space: &AddressSpace, +) -> Variant { // Get the Object / Variable by browse path if let Some(ref browse_path) = o.browse_path { // TODO o.data_type is ignored but be used to restrict the browse @@ -97,26 +205,43 @@ pub(crate) fn value_of_simple_attribute(object_id: &NodeId, o: &SimpleAttributeO if o.attribute_id == AttributeId::NodeId as u32 { node.node_id().into() } else { - error!("value_of, unsupported attribute id {} on object", o.attribute_id); + error!( + "value_of, unsupported attribute id {} on object", + o.attribute_id + ); Variant::Empty } } NodeType::Variable(ref node) => { if o.attribute_id == AttributeId::Value as u32 { - if let Some(ref value) = node.value(TimestampsToReturn::Neither, NumericRange::None, &QualifiedName::null(), 0.0).value { + if let Some(ref value) = node + .value( + TimestampsToReturn::Neither, + NumericRange::None, + &QualifiedName::null(), + 0.0, + ) + .value + { value.clone() } else { Variant::Empty } } else { - error!("value_of, unsupported attribute id {} on Variable", o.attribute_id); + error!( + "value_of, unsupported attribute id {} on Variable", + o.attribute_id + ); Variant::Empty } } - _ => Variant::Empty + _ => Variant::Empty, } } else { - error!("value_of, cannot find node from browse path {:?}", browse_path); + error!( + "value_of, cannot find node from browse path {:?}", + browse_path + ); Variant::Empty } } else { @@ -126,7 +251,13 @@ pub(crate) fn value_of_simple_attribute(object_id: &NodeId, o: &SimpleAttributeO } // This function fetches the value of the operand. -pub(crate) fn value_of(object_id: &NodeId, operand: &Operand, used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { +pub(crate) fn value_of( + object_id: &NodeId, + operand: &Operand, + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { match operand { Operand::ElementOperand(ref o) => { if used_elements.contains(&o.index) { @@ -134,14 +265,18 @@ pub(crate) fn value_of(object_id: &NodeId, operand: &Operand, used_elements: &mu Err(StatusCode::BadFilterOperandInvalid) } else { used_elements.insert(o.index); - let result = evaluate(object_id, &elements[o.index as usize], used_elements, elements, address_space); + let result = evaluate( + object_id, + &elements[o.index as usize], + used_elements, + elements, + address_space, + ); used_elements.remove(&o.index); result } } - Operand::LiteralOperand(ref o) => { - Ok(o.value.clone()) - } + Operand::LiteralOperand(ref o) => Ok(o.value.clone()), Operand::SimpleAttributeOperand(ref o) => { Ok(value_of_simple_attribute(object_id, o, address_space)) } @@ -167,8 +302,20 @@ fn convert(v1: Variant, v2: Variant) -> (Variant, Variant) { } // Tests if the operand is null (empty). TRUE if operand[0] is a null value. -pub(crate) fn is_null(object_id: &NodeId, operands: &[Operand], used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { - let v1 = value_of(object_id, &operands[0], used_elements, elements, address_space)?; +pub(crate) fn is_null( + object_id: &NodeId, + operands: &[Operand], + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { + let v1 = value_of( + object_id, + &operands[0], + used_elements, + elements, + address_space, + )?; Ok((Variant::Empty == v1).into()) } @@ -187,33 +334,36 @@ enum ComparisonResult { } macro_rules! compare_values { - ( $v1: expr, $v2: expr, $variant_type: ident ) => { - { - if let Variant::$variant_type(v1) = $v1 { - if let Variant::$variant_type(v2) = $v2 { - if v1 < v2 { - ComparisonResult::LessThan - } - else if v1 == v2 { - ComparisonResult::Equals - } - else { - ComparisonResult::GreaterThan - } + ( $v1: expr, $v2: expr, $variant_type: ident ) => {{ + if let Variant::$variant_type(v1) = $v1 { + if let Variant::$variant_type(v2) = $v2 { + if v1 < v2 { + ComparisonResult::LessThan + } else if v1 == v2 { + ComparisonResult::Equals } else { - panic!(); + ComparisonResult::GreaterThan } } else { panic!(); } + } else { + panic!(); } - } + }}; } /// Compares to operands by taking their numeric value, comparing the value and saying /// which of the two is less than, greater than or equal. If the values cannot be compared, the /// result is an error. -fn compare_operands(object_id: &NodeId, o1: &Operand, o2: &Operand, used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { +fn compare_operands( + object_id: &NodeId, + o1: &Operand, + o2: &Operand, + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { let v1 = value_of(object_id, o1, used_elements, elements, address_space)?; let v2 = value_of(object_id, o2, used_elements, elements, address_space)?; // Try and convert one value or the other to the same type @@ -229,12 +379,14 @@ fn compare_operands(object_id: &NodeId, o1: &Operand, o2: &Operand, used_element VariantTypeId::UInt64 => compare_values!(v1, v2, UInt64), VariantTypeId::Double => compare_values!(v1, v2, Double), VariantTypeId::Float => compare_values!(v1, v2, Float), - VariantTypeId::Boolean => if v1 == v2 { - ComparisonResult::Equals - } else { - ComparisonResult::NotEquals + VariantTypeId::Boolean => { + if v1 == v2 { + ComparisonResult::Equals + } else { + ComparisonResult::NotEquals + } } - _ => ComparisonResult::Error + _ => ComparisonResult::Error, }; Ok(result) } @@ -243,32 +395,97 @@ fn compare_operands(object_id: &NodeId, o1: &Operand, o2: &Operand, used_element // the system shall perform any implicit conversion to a common type. This operator resolves to // FALSE if no implicit conversion is available and the operands are of different types. This // operator returns FALSE if the implicit conversion fails. -pub(crate) fn eq(object_id: &NodeId, operands: &[Operand], used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { - let result = compare_operands(object_id, &operands[0], &operands[1], used_elements, elements, address_space)?; +pub(crate) fn eq( + object_id: &NodeId, + operands: &[Operand], + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { + let result = compare_operands( + object_id, + &operands[0], + &operands[1], + used_elements, + elements, + address_space, + )?; Ok((result == ComparisonResult::Equals).into()) } // Check if operand[0] is greater than operand[1] -pub(crate) fn gt(object_id: &NodeId, operands: &[Operand], used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { - let result = compare_operands(object_id, &operands[0], &operands[1], used_elements, elements, address_space)?; +pub(crate) fn gt( + object_id: &NodeId, + operands: &[Operand], + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { + let result = compare_operands( + object_id, + &operands[0], + &operands[1], + used_elements, + elements, + address_space, + )?; Ok((result == ComparisonResult::GreaterThan).into()) } // Check if operand[0] is less than operand[1] -pub(crate) fn lt(object_id: &NodeId, operands: &[Operand], used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { - let result = compare_operands(object_id, &operands[0], &operands[1], used_elements, elements, address_space)?; +pub(crate) fn lt( + object_id: &NodeId, + operands: &[Operand], + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { + let result = compare_operands( + object_id, + &operands[0], + &operands[1], + used_elements, + elements, + address_space, + )?; Ok((result == ComparisonResult::LessThan).into()) } // Check if operand[0] is greater than or equal to operand[1] -pub(crate) fn gte(object_id: &NodeId, operands: &[Operand], used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { - let result = compare_operands(object_id, &operands[0], &operands[1], used_elements, elements, address_space)?; +pub(crate) fn gte( + object_id: &NodeId, + operands: &[Operand], + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { + let result = compare_operands( + object_id, + &operands[0], + &operands[1], + used_elements, + elements, + address_space, + )?; Ok((result == ComparisonResult::GreaterThan || result == ComparisonResult::Equals).into()) } // Check if operand[0] is less than or equal to operand[1] -pub(crate) fn lte(object_id: &NodeId, operands: &[Operand], used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { - let result = compare_operands(object_id, &operands[0], &operands[1], used_elements, elements, address_space)?; +pub(crate) fn lte( + object_id: &NodeId, + operands: &[Operand], + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { + let result = compare_operands( + object_id, + &operands[0], + &operands[1], + used_elements, + elements, + address_space, + )?; Ok((result == ComparisonResult::LessThan || result == ComparisonResult::Equals).into()) } @@ -360,13 +577,31 @@ fn like_to_regex_tests() { compare_regex(like_to_regex("%").unwrap(), Regex::new("^.*$").unwrap()); compare_regex(like_to_regex("[%]").unwrap(), Regex::new("^[%]$").unwrap()); compare_regex(like_to_regex("[_]").unwrap(), Regex::new("^[_]$").unwrap()); - compare_regex(like_to_regex(r"[\]]").unwrap(), Regex::new(r"^[\]]$").unwrap()); - compare_regex(like_to_regex("[$().+*?]").unwrap(), Regex::new(r"^[\$\(\)\.\+\*\?]$").unwrap()); + compare_regex( + like_to_regex(r"[\]]").unwrap(), + Regex::new(r"^[\]]$").unwrap(), + ); + compare_regex( + like_to_regex("[$().+*?]").unwrap(), + Regex::new(r"^[\$\(\)\.\+\*\?]$").unwrap(), + ); compare_regex(like_to_regex("_").unwrap(), Regex::new("^?$").unwrap()); - compare_regex(like_to_regex("[a-z]").unwrap(), Regex::new("^[a-z]$").unwrap()); - compare_regex(like_to_regex("[abc]").unwrap(), Regex::new("^[abc]$").unwrap()); - compare_regex(like_to_regex(r"\[\]").unwrap(), Regex::new(r"^\[\]$").unwrap()); - compare_regex(like_to_regex("[^0-9]").unwrap(), Regex::new("^[^0-9]$").unwrap()); + compare_regex( + like_to_regex("[a-z]").unwrap(), + Regex::new("^[a-z]$").unwrap(), + ); + compare_regex( + like_to_regex("[abc]").unwrap(), + Regex::new("^[abc]$").unwrap(), + ); + compare_regex( + like_to_regex(r"\[\]").unwrap(), + Regex::new(r"^\[\]$").unwrap(), + ); + compare_regex( + like_to_regex("[^0-9]").unwrap(), + Regex::new("^[^0-9]$").unwrap(), + ); // Some samples from OPC UA part 4 let re = like_to_regex("Th[ia][ts]%").unwrap(); @@ -399,7 +634,13 @@ fn like_to_regex_tests() { } // Check if operand[0] is matches the pattern defined by operand[1]. -pub(crate) fn like(object_id: &NodeId, operands: &[Operand], used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { +pub(crate) fn like( + object_id: &NodeId, + operands: &[Operand], + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { // If 0 matches a pattern in 1. See table 117 // // 0 and 1 are operands that resolve to a string @@ -412,8 +653,22 @@ pub(crate) fn like(object_id: &NodeId, operands: &[Operand], used_elements: &mut // [] Match any single character in a list // [^] Not matching any single character in a list - let v1 = value_as(object_id, VariantTypeId::String, &operands[0], used_elements, elements, address_space)?; - let v2 = value_as(object_id, VariantTypeId::String, &operands[1], used_elements, elements, address_space)?; + let v1 = value_as( + object_id, + VariantTypeId::String, + &operands[0], + used_elements, + elements, + address_space, + )?; + let v2 = value_as( + object_id, + VariantTypeId::String, + &operands[1], + used_elements, + elements, + address_space, + )?; let result = if let Variant::String(v1) = v1 { if let Variant::String(v2) = v2 { @@ -433,11 +688,24 @@ pub(crate) fn like(object_id: &NodeId, operands: &[Operand], used_elements: &mut } // TRUE if operand[0] is FALSE. -pub(crate) fn not(object_id: &NodeId, operands: &[Operand], used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { +pub(crate) fn not( + object_id: &NodeId, + operands: &[Operand], + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { // operand[0] resolves to a boolean // TRUE if 0 is FALSE // If resolve fails, result is NULL - let v = value_as(object_id, VariantTypeId::Boolean, &operands[0], used_elements, elements, address_space)?; + let v = value_as( + object_id, + VariantTypeId::Boolean, + &operands[0], + used_elements, + elements, + address_space, + )?; let result = if let Variant::Boolean(v) = v { (!v).into() } else { @@ -447,29 +715,62 @@ pub(crate) fn not(object_id: &NodeId, operands: &[Operand], used_elements: &mut } // TRUE if operand[0] is greater or equal to operand[1] and less than or equal to operand[2]. -pub(crate) fn between(object_id: &NodeId, operands: &[Operand], used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { +pub(crate) fn between( + object_id: &NodeId, + operands: &[Operand], + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { // 0, 1, 2 are ordered values // Element 0 must be greater or equal than element 1 - let result = match compare_operands(object_id, &operands[0], &operands[1], used_elements, elements, address_space)? { + let result = match compare_operands( + object_id, + &operands[0], + &operands[1], + used_elements, + elements, + address_space, + )? { ComparisonResult::GreaterThan | ComparisonResult::Equals => { // Element must be less than or equal to element 2 - match compare_operands(object_id, &operands[0], &operands[2], used_elements, elements, address_space)? { + match compare_operands( + object_id, + &operands[0], + &operands[2], + used_elements, + elements, + address_space, + )? { ComparisonResult::LessThan | ComparisonResult::Equals => true, - _ => false + _ => false, } } - _ => false + _ => false, }; Ok(result.into()) } // TRUE if operand[0] is equal to one or more of the remaining operands -pub(crate) fn in_list(object_id: &NodeId, operands: &[Operand], used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { +pub(crate) fn in_list( + object_id: &NodeId, + operands: &[Operand], + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { // TRUE if operand[0] is equal to one or more of the remaining operands. // The Equals Operator is evaluated for operand[0] and each remaining operand in the list. // If any Equals evaluation is TRUE, InList returns TRUE. let found = operands[1..].iter().any(|o| { - if let Ok(result) = compare_operands(object_id, &operands[0], o, used_elements, elements, address_space) { + if let Ok(result) = compare_operands( + object_id, + &operands[0], + o, + used_elements, + elements, + address_space, + ) { result == ComparisonResult::Equals } else { false @@ -479,13 +780,33 @@ pub(crate) fn in_list(object_id: &NodeId, operands: &[Operand], used_elements: & } // TRUE if operand[0] and operand[1] are TRUE. -pub(crate) fn and(object_id: &NodeId, operands: &[Operand], used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { +pub(crate) fn and( + object_id: &NodeId, + operands: &[Operand], + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { // The following restrictions apply to the operands: // [0]: Any operand that resolves to a Boolean. // [1]: Any operand that resolves to a Boolean. // If any operand cannot be resolved to a Boolean it is considered a NULL. - let v1 = value_as(object_id, VariantTypeId::Boolean, &operands[0], used_elements, elements, address_space)?; - let v2 = value_as(object_id, VariantTypeId::Boolean, &operands[1], used_elements, elements, address_space)?; + let v1 = value_as( + object_id, + VariantTypeId::Boolean, + &operands[0], + used_elements, + elements, + address_space, + )?; + let v2 = value_as( + object_id, + VariantTypeId::Boolean, + &operands[1], + used_elements, + elements, + address_space, + )?; // Derived from Table 120 Logical AND Truth Table let result = if v1 == Variant::Boolean(true) && v2 == Variant::Boolean(true) { @@ -499,13 +820,33 @@ pub(crate) fn and(object_id: &NodeId, operands: &[Operand], used_elements: &mut } // TRUE if operand[0] or operand[1] are TRUE. -pub(crate) fn or(object_id: &NodeId, operands: &[Operand], used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { +pub(crate) fn or( + object_id: &NodeId, + operands: &[Operand], + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { // The following restrictions apply to the operands: // [0]: Any operand that resolves to a Boolean. // [1]: Any operand that resolves to a Boolean. // If any operand cannot be resolved to a Boolean it is considered a NULL. - let v1 = value_as(object_id, VariantTypeId::Boolean, &operands[0], used_elements, elements, address_space)?; - let v2 = value_as(object_id, VariantTypeId::Boolean, &operands[1], used_elements, elements, address_space)?; + let v1 = value_as( + object_id, + VariantTypeId::Boolean, + &operands[0], + used_elements, + elements, + address_space, + )?; + let v2 = value_as( + object_id, + VariantTypeId::Boolean, + &operands[1], + used_elements, + elements, + address_space, + )?; // Derived from Table 121 Logical OR Truth Table. let result = if v1 == Variant::Boolean(true) || v2 == Variant::Boolean(true) { @@ -520,15 +861,33 @@ pub(crate) fn or(object_id: &NodeId, operands: &[Operand], used_elements: &mut H } // Converts operand[0] to a value with a data type with a NodeId identified by operand[1]. -pub(crate) fn cast(object_id: &NodeId, operands: &[Operand], used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { +pub(crate) fn cast( + object_id: &NodeId, + operands: &[Operand], + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { // Explicitly casts operand 0 to a value with the data type with a node if identified in node 1 // [0] Any operand // [1] Any operand that resolves to a NodeId or ExpandedNodeId where the node is of type DataType // // In case of error evaluates to NULL. - let v1 = value_of(object_id, &operands[0], used_elements, elements, address_space)?; - let v2 = value_of(object_id, &operands[1], used_elements, elements, address_space)?; + let v1 = value_of( + object_id, + &operands[0], + used_elements, + elements, + address_space, + )?; + let v2 = value_of( + object_id, + &operands[1], + used_elements, + elements, + address_space, + )?; // Cast v1 using the datatype in v2 let result = match v2 { @@ -546,7 +905,7 @@ pub(crate) fn cast(object_id: &NodeId, operands: &[Operand], used_elements: &mut Variant::Empty } } - _ => Variant::Empty + _ => Variant::Empty, }; Ok(result) } @@ -558,27 +917,44 @@ enum BitOperation { } macro_rules! bitwise_operation { - ( $v1: expr, $v2: expr, $op: expr, $variant_type: ident ) => { - { - if let Variant::$variant_type(v1) = $v1 { - if let Variant::$variant_type(v2) = $v2 { - match $op { - BitOperation::And => (v1 & v2).into(), - BitOperation::Or => (v1 | v2).into() - } - } else { - panic!(); + ( $v1: expr, $v2: expr, $op: expr, $variant_type: ident ) => {{ + if let Variant::$variant_type(v1) = $v1 { + if let Variant::$variant_type(v2) = $v2 { + match $op { + BitOperation::And => (v1 & v2).into(), + BitOperation::Or => (v1 | v2).into(), } } else { panic!(); } + } else { + panic!(); } - } + }}; } -fn bitwise_operation(object_id: &NodeId, operation: BitOperation, operands: &[Operand], used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { - let v1 = value_of(object_id, &operands[0], used_elements, elements, address_space)?; - let v2 = value_of(object_id, &operands[1], used_elements, elements, address_space)?; +fn bitwise_operation( + object_id: &NodeId, + operation: BitOperation, + operands: &[Operand], + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { + let v1 = value_of( + object_id, + &operands[0], + used_elements, + elements, + address_space, + )?; + let v2 = value_of( + object_id, + &operands[1], + used_elements, + elements, + address_space, + )?; // Try and convert one value or the other to the same type let (v1, v2) = convert(v1, v2); let result = match v1.type_id() { @@ -590,7 +966,7 @@ fn bitwise_operation(object_id: &NodeId, operation: BitOperation, operands: &[Op VariantTypeId::UInt16 => bitwise_operation!(v1, v2, operation, UInt16), VariantTypeId::UInt32 => bitwise_operation!(v1, v2, operation, UInt32), VariantTypeId::UInt64 => bitwise_operation!(v1, v2, operation, UInt64), - _ => Variant::Empty + _ => Variant::Empty, }; Ok(result) } @@ -598,13 +974,39 @@ fn bitwise_operation(object_id: &NodeId, operation: BitOperation, operands: &[Op // The result is an integer which matches the size of the largest operand and contains a bitwise // And operation of the two operands where both have been converted to the same size (largest of // the two operands). -pub(crate) fn bitwise_and(object_id: &NodeId, operands: &[Operand], used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { - bitwise_operation(object_id, BitOperation::And, operands, used_elements, elements, address_space) +pub(crate) fn bitwise_and( + object_id: &NodeId, + operands: &[Operand], + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { + bitwise_operation( + object_id, + BitOperation::And, + operands, + used_elements, + elements, + address_space, + ) } // The result is an integer which matches the size of the largest operand and contains a bitwise Or // operation of the two operands where both have been converted to the same size (largest of the // two operands). -pub(crate) fn bitwise_or(object_id: &NodeId, operands: &[Operand], used_elements: &mut HashSet, elements: &[ContentFilterElement], address_space: &AddressSpace) -> Result { - bitwise_operation(object_id, BitOperation::Or, operands, used_elements, elements, address_space) +pub(crate) fn bitwise_or( + object_id: &NodeId, + operands: &[Operand], + used_elements: &mut HashSet, + elements: &[ContentFilterElement], + address_space: &AddressSpace, +) -> Result { + bitwise_operation( + object_id, + BitOperation::Or, + operands, + used_elements, + elements, + address_space, + ) } diff --git a/server/src/historical/mod.rs b/server/src/historical/mod.rs index 7da5551b4..c6b233fee 100644 --- a/server/src/historical/mod.rs +++ b/server/src/historical/mod.rs @@ -7,8 +7,8 @@ use std::{ sync::{Arc, RwLock}, }; -use opcua_types::*; use opcua_types::status_code::StatusCode; +use opcua_types::*; use crate::address_space::AddressSpace; @@ -40,17 +40,32 @@ pub struct HistoryServerCapabilities { /// to spawn a thread for that activity. Updates and deletes should be spawned on separate threads /// if they are lengthy operations. pub trait HistoricalEventProvider { - fn read_event_details(&self, _address_space: Arc>, _request: ReadEventDetails, _timestamps_to_return: TimestampsToReturn, _release_continuation_points: bool, _nodes_to_read: &[HistoryReadValueId]) -> Result, StatusCode> { + fn read_event_details( + &self, + _address_space: Arc>, + _request: ReadEventDetails, + _timestamps_to_return: TimestampsToReturn, + _release_continuation_points: bool, + _nodes_to_read: &[HistoryReadValueId], + ) -> Result, StatusCode> { info!("Unimplemented read_event_details"); Err(StatusCode::BadHistoryOperationUnsupported) } - fn update_event_details(&self, _address_space: Arc>, _request: UpdateEventDetails) -> Result, StatusCode> { + fn update_event_details( + &self, + _address_space: Arc>, + _request: UpdateEventDetails, + ) -> Result, StatusCode> { info!("Unimplemented update_event_details"); Err(StatusCode::BadHistoryOperationUnsupported) } - fn delete_event_details(&self, _address_space: Arc>, _request: DeleteEventDetails) -> Result, StatusCode> { + fn delete_event_details( + &self, + _address_space: Arc>, + _request: DeleteEventDetails, + ) -> Result, StatusCode> { info!("Unimplemented delete_event_details"); Err(StatusCode::BadHistoryOperationUnsupported) } @@ -72,37 +87,74 @@ pub enum HistoryRawData { pub trait HistoricalDataProvider { /// Note: Function returns an `HistoryRawData` enum containing *either* a `HistoryData` for a read raw action /// or a `HistoryModifiedData` for a read modified action. - fn read_raw_modified_details(&self, _address_space: Arc>, _request: ReadRawModifiedDetails, _timestamps_to_return: TimestampsToReturn, _release_continuation_points: bool, _nodes_to_read: &[HistoryReadValueId]) -> Result, StatusCode> { + fn read_raw_modified_details( + &self, + _address_space: Arc>, + _request: ReadRawModifiedDetails, + _timestamps_to_return: TimestampsToReturn, + _release_continuation_points: bool, + _nodes_to_read: &[HistoryReadValueId], + ) -> Result, StatusCode> { info!("Unimplemented read_raw_modified_details"); Err(StatusCode::BadHistoryOperationUnsupported) } - fn read_processed_details(&self, _address_space: Arc>, _request: ReadProcessedDetails, _timestamps_to_return: TimestampsToReturn, _release_continuation_points: bool, _nodes_to_read: &[HistoryReadValueId]) -> Result, StatusCode> { + fn read_processed_details( + &self, + _address_space: Arc>, + _request: ReadProcessedDetails, + _timestamps_to_return: TimestampsToReturn, + _release_continuation_points: bool, + _nodes_to_read: &[HistoryReadValueId], + ) -> Result, StatusCode> { info!("Unimplemented read_processed_details"); Err(StatusCode::BadHistoryOperationUnsupported) } - fn read_at_time_details(&self, _address_space: Arc>, _request: ReadAtTimeDetails, _timestamps_to_return: TimestampsToReturn, _release_continuation_points: bool, _nodes_to_read: &[HistoryReadValueId]) -> Result, StatusCode> { + fn read_at_time_details( + &self, + _address_space: Arc>, + _request: ReadAtTimeDetails, + _timestamps_to_return: TimestampsToReturn, + _release_continuation_points: bool, + _nodes_to_read: &[HistoryReadValueId], + ) -> Result, StatusCode> { info!("Unimplemented read_at_time_details"); Err(StatusCode::BadHistoryOperationUnsupported) } - fn update_data_details(&self, _address_space: Arc>, _request: UpdateDataDetails) -> Result, StatusCode> { + fn update_data_details( + &self, + _address_space: Arc>, + _request: UpdateDataDetails, + ) -> Result, StatusCode> { info!("Unimplemented update_data_details"); Err(StatusCode::BadHistoryOperationUnsupported) } - fn update_structure_data_details(&self, _address_space: Arc>, _request: UpdateStructureDataDetails) -> Result, StatusCode> { + fn update_structure_data_details( + &self, + _address_space: Arc>, + _request: UpdateStructureDataDetails, + ) -> Result, StatusCode> { info!("Unimplemented update_structure_data_details"); Err(StatusCode::BadHistoryOperationUnsupported) } - fn delete_raw_modified_details(&self, _address_space: Arc>, _request: DeleteRawModifiedDetails) -> Result, StatusCode> { + fn delete_raw_modified_details( + &self, + _address_space: Arc>, + _request: DeleteRawModifiedDetails, + ) -> Result, StatusCode> { info!("Unimplemented delete_raw_modified_details"); Err(StatusCode::BadHistoryOperationUnsupported) } - fn delete_at_time_details(&self, _address_space: Arc>, _request: DeleteAtTimeDetails) -> Result, StatusCode> { + fn delete_at_time_details( + &self, + _address_space: Arc>, + _request: DeleteAtTimeDetails, + ) -> Result, StatusCode> { info!("Unimplemented delete_at_time_details"); Err(StatusCode::BadHistoryOperationUnsupported) } diff --git a/server/src/http/mod.rs b/server/src/http/mod.rs index 2b4d95180..2a3814ce1 100644 --- a/server/src/http/mod.rs +++ b/server/src/http/mod.rs @@ -3,26 +3,18 @@ // Copyright (C) 2017-2020 Adam Lock use std::{ - thread, path::PathBuf, - sync::{ - Arc, RwLock, - mpsc, - }, + path::PathBuf, + sync::{mpsc, Arc, RwLock}, + thread, }; -use futures::{Poll, Async}; use futures::future::Future; +use futures::{Async, Poll}; -use actix_web::{ - actix, http, server, App, Responder, HttpRequest, HttpResponse, fs, -}; +use actix_web::{actix, fs, http, server, App, HttpRequest, HttpResponse, Responder}; use serde_json; -use crate::{ - server::Connections, - metrics::ServerMetrics, - state::ServerState, -}; +use crate::{metrics::ServerMetrics, server::Connections, state::ServerState}; /// This is our metrics service, the thing called to handle requests coming from hyper #[derive(Clone)] @@ -38,9 +30,7 @@ fn abort(req: &HttpRequest) -> impl Responder { // Abort the server from the command let mut server_state = state.server_state.write().unwrap(); server_state.abort(); - HttpResponse::Ok() - .content_type("text/plain") - .body("OK") + HttpResponse::Ok().content_type("text/plain").body("OK") } else { // Abort is only enabled in debug mode HttpResponse::Ok() @@ -80,7 +70,7 @@ fn metrics(req: &HttpRequest) -> impl Responder { } struct HttpQuit { - server_state: Arc> + server_state: Arc>, } impl Future for HttpQuit { @@ -101,31 +91,48 @@ impl Future for HttpQuit { } /// Runs an http server on the specified binding address, serving out the supplied server metrics -pub fn run_http_server(address: &str, content_path: &str, server_state: Arc>, connections: Arc>, server_metrics: Arc>) { +pub fn run_http_server( + address: &str, + content_path: &str, + server_state: Arc>, + connections: Arc>, + server_metrics: Arc>, +) { let address = String::from(address); let base_path = PathBuf::from(content_path); - let quit_task = HttpQuit { server_state: server_state.clone() }; + let quit_task = HttpQuit { + server_state: server_state.clone(), + }; let (tx, rx) = mpsc::channel(); thread::spawn(move || { - info!("HTTP server is running on http://{}/ to provide OPC UA server metrics", address); + info!( + "HTTP server is running on http://{}/ to provide OPC UA server metrics", + address + ); let sys = actix::System::new("http-server"); - let addr = server::new( - move || { - App::with_state(HttpState { - server_state: server_state.clone(), - connections: connections.clone(), - server_metrics: server_metrics.clone(), - }) - .resource("/server/metrics", |r| r.method(http::Method::GET).f(metrics)) - .resource("/server/abort", |r| r.method(http::Method::GET).f(abort)) - .handler("/", fs::StaticFiles::new(base_path.clone()).unwrap() - .index_file("index.html")) + let addr = server::new(move || { + App::with_state(HttpState { + server_state: server_state.clone(), + connections: connections.clone(), + server_metrics: server_metrics.clone(), + }) + .resource("/server/metrics", |r| { + r.method(http::Method::GET).f(metrics) }) - .bind(&address).unwrap() - .start(); + .resource("/server/abort", |r| r.method(http::Method::GET).f(abort)) + .handler( + "/", + fs::StaticFiles::new(base_path.clone()) + .unwrap() + .index_file("index.html"), + ) + }) + .bind(&address) + .unwrap() + .start(); // Give the address info to the quit task let _ = tx.send(addr); @@ -141,9 +148,7 @@ pub fn run_http_server(address: &str, content_path: &str, server_state: Arc { - IdentityToken::Invalid(o.clone()) - } + _ => IdentityToken::Invalid(o.clone()), } } else { IdentityToken::Invalid(o.clone()) } } -} \ No newline at end of file +} diff --git a/server/src/lib.rs b/server/src/lib.rs index 9fcd1c299..a8648bd9e 100644 --- a/server/src/lib.rs +++ b/server/src/lib.rs @@ -17,10 +17,10 @@ //! # Example //! //! This is a very simple server which runs with the default address space on the default port. -//! +//! //! ```no_run //! use opcua_server::prelude::*; -//! +//! //! fn main() { //! let server: Server = ServerBuilder::new_sample().server().unwrap(); //! server.run(); @@ -47,7 +47,7 @@ extern crate opcua_core; macro_rules! is_empty_option_vec { ( $v: expr ) => { $v.is_none() || $v.as_ref().unwrap().is_empty() - } + }; } /// Matches macro taken from matches crate @@ -60,8 +60,8 @@ macro_rules! matches { } } -mod services; mod identity_token; +mod services; #[cfg(feature = "discovery-server-registration")] mod discovery; @@ -91,11 +91,6 @@ pub mod util; pub mod prelude { //! Provides a way to use most types and functions commonly used by server implementations from a //! single use statement. - pub use opcua_types::status_code::StatusCode; - pub use opcua_types::*; - pub use opcua_types::service_types::*; - pub use opcua_core::prelude::*; - pub use opcua_crypto::*; pub use crate::{ address_space::types::*, address_space::{AccessLevel, EventNotifier, UserAccessLevel}, @@ -108,6 +103,11 @@ pub mod prelude { subscriptions::*, util::*, }; + pub use opcua_core::prelude::*; + pub use opcua_crypto::*; + pub use opcua_types::service_types::*; + pub use opcua_types::status_code::StatusCode; + pub use opcua_types::*; } pub mod constants { diff --git a/server/src/metrics.rs b/server/src/metrics.rs index 7d656cb08..a5de8549a 100644 --- a/server/src/metrics.rs +++ b/server/src/metrics.rs @@ -72,12 +72,15 @@ impl ServerMetrics { }; // For security, blank out user tokens config.user_tokens.clear(); - config.user_tokens.insert(String::new(), config::ServerUserToken { - user: String::from("User identity tokens have been removed"), - pass: None, - x509: None, - thumbprint: None, - }); + config.user_tokens.insert( + String::new(), + config::ServerUserToken { + user: String::from("User identity tokens have been removed"), + pass: None, + x509: None, + thumbprint: None, + }, + ); self.config = Some(config.clone()); } @@ -94,54 +97,74 @@ impl ServerMetrics { self.diagnostics = diagnostics.clone(); } - let elapsed = now.as_chrono().signed_duration_since(start_time.as_chrono()); + let elapsed = now + .as_chrono() + .signed_duration_since(start_time.as_chrono()); self.server.uptime_ms = elapsed.num_milliseconds(); } // Update the connection metrics which includes susbcriptions and monitored items pub fn update_from_connections(&mut self, connections: server::Connections) { self.runtime_components = runtime_components!(); - self.connections = connections.iter().map(|c| { - // Carefully extract info while minimizing chance of deadlock - let (client_address, transport_state, session) = { - let connection = trace_read_lock_unwrap!(c); - let client_address = if let Some(ref client_address) = connection.client_address() { - format!("{:?}", client_address) - } else { - String::new() - }; - let transport_state = match connection.state() { - TransportState::New => "New".to_string(), - TransportState::WaitingHello => "WaitingHello".to_string(), - TransportState::ProcessMessages => "ProcessMessages".to_string(), - TransportState::Finished(status_code) => format!("Finished({})", status_code) + self.connections = connections + .iter() + .map(|c| { + // Carefully extract info while minimizing chance of deadlock + let (client_address, transport_state, session) = { + let connection = trace_read_lock_unwrap!(c); + let client_address = + if let Some(ref client_address) = connection.client_address() { + format!("{:?}", client_address) + } else { + String::new() + }; + let transport_state = match connection.state() { + TransportState::New => "New".to_string(), + TransportState::WaitingHello => "WaitingHello".to_string(), + TransportState::ProcessMessages => "ProcessMessages".to_string(), + TransportState::Finished(status_code) => { + format!("Finished({})", status_code) + } + }; + (client_address, transport_state, connection.session()) }; - (client_address, transport_state, connection.session()) - }; - let (id, session_activated, session_terminated, session_terminated_at, subscriptions) = { - let session = trace_read_lock_unwrap!(session); - let id = session.session_id().to_string(); - let session_activated = session.is_activated(); - let session_terminated = session.is_terminated(); - let session_terminated_at = if session.is_terminated() { - session.terminated_at().to_rfc3339() - } else { - String::new() + let ( + id, + session_activated, + session_terminated, + session_terminated_at, + subscriptions, + ) = { + let session = trace_read_lock_unwrap!(session); + let id = session.session_id().to_string(); + let session_activated = session.is_activated(); + let session_terminated = session.is_terminated(); + let session_terminated_at = if session.is_terminated() { + session.terminated_at().to_rfc3339() + } else { + String::new() + }; + let subscriptions = session.subscriptions().metrics(); + ( + id, + session_activated, + session_terminated, + session_terminated_at, + subscriptions, + ) }; - let subscriptions = session.subscriptions().metrics(); - (id, session_activated, session_terminated, session_terminated_at, subscriptions) - }; - // session.subscriptions.iterate ... - Connection { - id, - client_address, - transport_state, - session_activated, - session_terminated, - session_terminated_at, - subscriptions, - } - }).collect(); + // session.subscriptions.iterate ... + Connection { + id, + client_address, + transport_state, + session_activated, + session_terminated, + session_terminated_at, + subscriptions, + } + }) + .collect(); } } diff --git a/server/src/server.rs b/server/src/server.rs index 50725f8c6..791b78335 100644 --- a/server/src/server.rs +++ b/server/src/server.rs @@ -11,15 +11,18 @@ use std::{ time::{Duration, Instant}, }; -use futures::{Future, future, Stream, sync::mpsc::{unbounded, UnboundedSender}}; -use tokio::{self, net::{TcpListener, TcpStream}}; +use futures::{ + future, + sync::mpsc::{unbounded, UnboundedSender}, + Future, Stream, +}; +use tokio::{ + self, + net::{TcpListener, TcpStream}, +}; use tokio_timer::Interval; -use opcua_core::{ - completion_pact, - config::Config, - prelude::*, -}; +use opcua_core::{completion_pact, config::Config, prelude::*}; use opcua_crypto::*; use opcua_types::service_types::ServerState as ServerStateType; @@ -29,8 +32,8 @@ use crate::{ comms::transport::Transport, config::ServerConfig, constants, - events::audit::AuditLog, diagnostics::ServerDiagnostics, + events::audit::AuditLog, metrics::ServerMetrics, services::message_handler::MessageHandler, session::Session, @@ -105,7 +108,10 @@ impl Server { let product_uri = UAString::from(&config.product_uri); let start_time = DateTime::now(); let servers = vec![config.application_uri.clone()]; - let base_endpoint = format!("opc.tcp://{}:{}", config.tcp_config.host, config.tcp_config.port); + let base_endpoint = format!( + "opc.tcp://{}:{}", + config.tcp_config.host, config.tcp_config.port + ); let max_subscriptions = config.limits.max_subscriptions as usize; let max_monitored_items_per_sub = config.limits.max_monitored_items_per_sub as usize; let diagnostics = Arc::new(RwLock::new(ServerDiagnostics::default())); @@ -115,9 +121,18 @@ impl Server { // TODO max string, byte string and array lengths // Security, pki auto create cert - let application_description = if config.create_sample_keypair { Some(config.application_description()) } else { None }; - let (mut certificate_store, server_certificate, server_pkey) = CertificateStore::new_with_keypair( - &config.pki_dir, config.certificate_path.as_deref(), config.private_key_path.as_deref(), application_description); + let application_description = if config.create_sample_keypair { + Some(config.application_description()) + } else { + None + }; + let (mut certificate_store, server_certificate, server_pkey) = + CertificateStore::new_with_keypair( + &config.pki_dir, + config.certificate_path.as_deref(), + config.private_key_path.as_deref(), + application_description, + ); if server_certificate.is_none() || server_pkey.is_none() { error!("Server is missing its application instance certificate and/or its private key. Encrypted endpoints will not function correctly.") } @@ -168,7 +183,7 @@ impl Server { unregister_nodes_callback: None, historical_data_provider: None, historical_event_provider: None, - operational_limits: OperationalLimits::default() + operational_limits: OperationalLimits::default(), }; let server_state = Arc::new(RwLock::new(server_state)); @@ -222,15 +237,16 @@ impl Server { let config = trace_read_lock_unwrap!(server_state.config); // Discovery url must be present and valid - let discovery_server_url = if let Some(ref discovery_server_url) = config.discovery_server_url { - if is_valid_opc_ua_url(discovery_server_url) { - Some(discovery_server_url.clone()) + let discovery_server_url = + if let Some(ref discovery_server_url) = config.discovery_server_url { + if is_valid_opc_ua_url(discovery_server_url) { + Some(discovery_server_url.clone()) + } else { + None + } } else { None - } - } else { - None - }; + }; (sock_addr, discovery_server_url) }; @@ -378,7 +394,11 @@ impl Server { info!("Base url: {}", server_state.base_endpoint); info!("Supported endpoints:"); for (id, endpoint) in &config.endpoints { - let users: Vec = endpoint.user_token_ids.iter().map(|id| id.clone()).collect(); + let users: Vec = endpoint + .user_token_ids + .iter() + .map(|id| id.clone()) + .collect(); let users = users.join(", "); info!("Endpoint \"{}\": {}", id, endpoint.path); info!(" Security Mode: {}", endpoint.security_mode); @@ -457,7 +477,10 @@ impl Server { use std::sync::Mutex; let discovery_server_url = discovery_server_url.to_string(); - info!("Server has set a discovery server url {} which will be used to register the server", discovery_server_url); + info!( + "Server has set a discovery server url {} which will be used to register the server", + discovery_server_url + ); let server_state = self.server_state.clone(); let server_state_for_take = self.server_state.clone(); @@ -494,7 +517,10 @@ impl Server { let _ = std::panic::catch_unwind(move || { let server_state = trace_read_lock_unwrap!(server_state); if server_state.is_running() { - discovery::register_with_discovery_server(&discovery_server_url, &server_state); + discovery::register_with_discovery_server( + &discovery_server_url, + &server_state, + ); } }); }); @@ -518,14 +544,17 @@ impl Server { /// implementation will move any variables into the function that are required to perform its /// action. pub fn add_polling_action(&mut self, interval_ms: u64, action: F) - where F: Fn() + Send + Sync + 'static { + where + F: Fn() + Send + Sync + 'static, + { // If the server is not yet running, the action is queued and is started later let server_state = trace_read_lock_unwrap!(self.server_state); if server_state.is_abort() { error!("Polling action added when server is aborting"); - // DO NOTHING + // DO NOTHING } else if !server_state.is_running() { - self.pending_polling_actions.push((interval_ms, Box::new(action))); + self.pending_polling_actions + .push((interval_ms, Box::new(action))); } else { // Start the action immediately let _ = PollingAction::spawn(self.server_state.clone(), interval_ms, move || { @@ -541,7 +570,10 @@ impl Server { self.pending_polling_actions .drain(..) .for_each(|(interval_ms, action)| { - debug!("Starting a pending polling action at rate of {} ms", interval_ms); + debug!( + "Starting a pending polling action at rate of {} ms", + interval_ms + ); let _ = PollingAction::spawn(server_state.clone(), interval_ms, move || { // Call the provided action action(); @@ -551,14 +583,22 @@ impl Server { /// Create a new transport. pub fn new_transport(&self) -> TcpTransport { - let session = { - Arc::new(RwLock::new(Session::new(self))) - }; + let session = { Arc::new(RwLock::new(Session::new(self))) }; // TODO session should be stored in a sessions list so that disconnected sessions can be // reestablished if necessary let address_space = self.address_space.clone(); - let message_handler = MessageHandler::new(self.certificate_store.clone(), self.server_state.clone(), session.clone(), address_space.clone()); - TcpTransport::new(self.server_state.clone(), session, address_space, message_handler) + let message_handler = MessageHandler::new( + self.certificate_store.clone(), + self.server_state.clone(), + session.clone(), + address_space.clone(), + ); + TcpTransport::new( + self.server_state.clone(), + session, + address_space, + message_handler, + ) } /// Handles the incoming request @@ -576,7 +616,10 @@ impl Server { let looping_interval_ms = { let server_state = trace_read_lock_unwrap!(self.server_state); // Get the minimum interval in ms - f64::min(server_state.min_publishing_interval_ms, server_state.min_sampling_interval_ms) + f64::min( + server_state.min_publishing_interval_ms, + server_state.min_sampling_interval_ms, + ) }; // Run adds a session task to the tokio session diff --git a/server/src/services/attribute.rs b/server/src/services/attribute.rs index ee2aae9fb..9ae9f8b65 100644 --- a/server/src/services/attribute.rs +++ b/server/src/services/attribute.rs @@ -8,16 +8,20 @@ use std::{ }; use opcua_core::supported_message::SupportedMessage; -use opcua_types::*; use opcua_types::status_code::StatusCode; +use opcua_types::*; +use crate::address_space::types::NodeBase; use crate::{ - address_space::{AddressSpace, node::{HasNodeId, NodeType}, UserAccessLevel, variable::Variable}, + address_space::{ + node::{HasNodeId, NodeType}, + variable::Variable, + AddressSpace, UserAccessLevel, + }, services::Service, session::Session, state::ServerState, }; -use crate::address_space::types::NodeBase; enum ReadDetails { ReadEventDetails(ReadEventDetails), @@ -39,7 +43,9 @@ enum UpdateDetails { pub(crate) struct AttributeService {} impl Service for AttributeService { - fn name(&self) -> String { String::from("AttributeService") } + fn name(&self) -> String { + String::from("AttributeService") + } } impl AttributeService { @@ -53,7 +59,13 @@ impl AttributeService { /// elements or to read ranges of elements of the composite. Servers may make historical /// values available to Clients using this Service, although the historical values themselves /// are not visible in the AddressSpace. - pub fn read(&self, server_state: Arc>, session: Arc>, address_space: Arc>, request: &ReadRequest) -> SupportedMessage { + pub fn read( + &self, + server_state: Arc>, + session: Arc>, + address_space: Arc>, + request: &ReadRequest, + ) -> SupportedMessage { if is_empty_option_vec!(request.nodes_to_read) { self.service_fault(&request.request_header, StatusCode::BadNothingToDo) } else if request.max_age < 0f64 { @@ -62,7 +74,10 @@ impl AttributeService { self.service_fault(&request.request_header, StatusCode::BadMaxAgeInvalid) } else if request.timestamps_to_return == TimestampsToReturn::Invalid { warn!("ReadRequest invalid timestamps to return"); - self.service_fault(&request.request_header, StatusCode::BadTimestampsToReturnInvalid) + self.service_fault( + &request.request_header, + StatusCode::BadTimestampsToReturnInvalid, + ) } else { let server_state = trace_read_lock_unwrap!(server_state); let nodes_to_read = request.nodes_to_read.as_ref().unwrap(); @@ -71,9 +86,18 @@ impl AttributeService { let session = trace_read_lock_unwrap!(session); let address_space = trace_read_lock_unwrap!(address_space); let timestamps_to_return = request.timestamps_to_return; - let results = nodes_to_read.iter().map(|node_to_read| { - Self::read_node_value(&session, &address_space, node_to_read, request.max_age, timestamps_to_return) - }).collect(); + let results = nodes_to_read + .iter() + .map(|node_to_read| { + Self::read_node_value( + &session, + &address_space, + node_to_read, + request.max_age, + timestamps_to_return, + ) + }) + .collect(); let diagnostic_infos = None; let response = ReadResponse { @@ -90,7 +114,13 @@ impl AttributeService { } /// Used to read historical values - pub fn history_read(&self, server_state: Arc>, _session: Arc>, address_space: Arc>, request: &HistoryReadRequest) -> SupportedMessage { + pub fn history_read( + &self, + server_state: Arc>, + _session: Arc>, + address_space: Arc>, + request: &HistoryReadRequest, + ) -> SupportedMessage { if is_empty_option_vec!(request.nodes_to_read) { self.service_fault(&request.request_header, StatusCode::BadNothingToDo) } else { @@ -98,7 +128,12 @@ impl AttributeService { let server_state = trace_read_lock_unwrap!(server_state); server_state.decoding_limits() }; - match Self::do_history_read_details(&decoding_limits, server_state, address_space, request) { + match Self::do_history_read_details( + &decoding_limits, + server_state, + address_space, + request, + ) { Ok(results) => { let diagnostic_infos = None; let response = HistoryReadResponse { @@ -108,9 +143,7 @@ impl AttributeService { }; response.into() } - Err(status_code) => { - self.service_fault(&request.request_header, status_code) - } + Err(status_code) => self.service_fault(&request.request_header, status_code), } } } @@ -119,7 +152,13 @@ impl AttributeService { /// constructed Attribute values whose elements are indexed, such as an array, this Service /// allows Clients to write the entire set of indexed values as a composite, to write individual /// elements or to write ranges of elements of the composite. - pub fn write(&self, server_state: Arc>, session: Arc>, address_space: Arc>, request: &WriteRequest) -> SupportedMessage { + pub fn write( + &self, + server_state: Arc>, + session: Arc>, + address_space: Arc>, + request: &WriteRequest, + ) -> SupportedMessage { if is_empty_option_vec!(request.nodes_to_write) { debug!("Empty list passed to write {:?}", request); self.service_fault(&request.request_header, StatusCode::BadNothingToDo) @@ -131,25 +170,38 @@ impl AttributeService { let nodes_to_write = request.nodes_to_write.as_ref().unwrap(); if nodes_to_write.len() <= server_state.operational_limits.max_nodes_per_write { - let results = nodes_to_write.iter().map(|node_to_write| { - Self::write_node_value(&session, &mut address_space, node_to_write) - }).collect(); + let results = nodes_to_write + .iter() + .map(|node_to_write| { + Self::write_node_value(&session, &mut address_space, node_to_write) + }) + .collect(); let diagnostic_infos = None; WriteResponse { response_header: ResponseHeader::new_good(&request.request_header), results: Some(results), diagnostic_infos, - }.into() + } + .into() } else { - warn!("WriteRequest too many nodes to write {}", nodes_to_write.len()); + warn!( + "WriteRequest too many nodes to write {}", + nodes_to_write.len() + ); self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) } } } /// Used to update or update historical values - pub fn history_update(&self, server_state: Arc>, _session: Arc>, address_space: Arc>, request: &HistoryUpdateRequest) -> SupportedMessage { + pub fn history_update( + &self, + server_state: Arc>, + _session: Arc>, + address_space: Arc>, + request: &HistoryUpdateRequest, + ) -> SupportedMessage { if is_empty_option_vec!(request.history_update_details) { self.service_fault(&request.request_header, StatusCode::BadNothingToDo) } else { @@ -159,77 +211,146 @@ impl AttributeService { server_state.decoding_limits() }; let history_update_details = request.history_update_details.as_ref().unwrap(); - let results = history_update_details.iter().map(|u| { - // Decode the update/delete action - let (status_code, operation_results) = Self::do_history_update_details(&decoding_limits, server_state.clone(), address_space.clone(), u); - HistoryUpdateResult { - status_code, - operation_results, - diagnostic_infos: None, - } - }).collect(); + let results = history_update_details + .iter() + .map(|u| { + // Decode the update/delete action + let (status_code, operation_results) = Self::do_history_update_details( + &decoding_limits, + server_state.clone(), + address_space.clone(), + u, + ); + HistoryUpdateResult { + status_code, + operation_results, + diagnostic_infos: None, + } + }) + .collect(); HistoryUpdateResponse { response_header: ResponseHeader::new_good(&request.request_header), results: Some(results), diagnostic_infos: None, - }.into() + } + .into() } } fn node_id_to_action(node_id: &NodeId, actions: &[ObjectId]) -> Result { let object_id = node_id.as_object_id()?; - actions.iter().find(|v| object_id == **v) + actions + .iter() + .find(|v| object_id == **v) .map(|v| *v) .ok_or(()) } fn node_id_to_historical_read_action(node_id: &NodeId) -> Result { - Self::node_id_to_action(node_id, &[ - ObjectId::ReadEventDetails_Encoding_DefaultBinary, - ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary, - ObjectId::ReadProcessedDetails_Encoding_DefaultBinary, - ObjectId::ReadAtTimeDetails_Encoding_DefaultBinary - ]) + Self::node_id_to_action( + node_id, + &[ + ObjectId::ReadEventDetails_Encoding_DefaultBinary, + ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary, + ObjectId::ReadProcessedDetails_Encoding_DefaultBinary, + ObjectId::ReadAtTimeDetails_Encoding_DefaultBinary, + ], + ) } fn node_id_to_historical_update_action(node_id: &NodeId) -> Result { - Self::node_id_to_action(node_id, &[ - ObjectId::UpdateDataDetails_Encoding_DefaultBinary, - ObjectId::UpdateStructureDataDetails_Encoding_DefaultBinary, - ObjectId::UpdateEventDetails_Encoding_DefaultBinary, - ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary, - ObjectId::DeleteAtTimeDetails_Encoding_DefaultBinary, - ObjectId::DeleteEventDetails_Encoding_DefaultBinary - ]) + Self::node_id_to_action( + node_id, + &[ + ObjectId::UpdateDataDetails_Encoding_DefaultBinary, + ObjectId::UpdateStructureDataDetails_Encoding_DefaultBinary, + ObjectId::UpdateEventDetails_Encoding_DefaultBinary, + ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary, + ObjectId::DeleteAtTimeDetails_Encoding_DefaultBinary, + ObjectId::DeleteEventDetails_Encoding_DefaultBinary, + ], + ) } - fn decode_history_read_details(history_read_details: &ExtensionObject, decoding_limits: &DecodingLimits) -> Result { + fn decode_history_read_details( + history_read_details: &ExtensionObject, + decoding_limits: &DecodingLimits, + ) -> Result { let action = Self::node_id_to_historical_read_action(&history_read_details.node_id) .map_err(|_| StatusCode::BadHistoryOperationInvalid)?; match action { - ObjectId::ReadEventDetails_Encoding_DefaultBinary => Ok(ReadDetails::ReadEventDetails(history_read_details.decode_inner::(&decoding_limits)?)), - ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary => Ok(ReadDetails::ReadRawModifiedDetails(history_read_details.decode_inner::(&decoding_limits)?)), - ObjectId::ReadProcessedDetails_Encoding_DefaultBinary => Ok(ReadDetails::ReadProcessedDetails(history_read_details.decode_inner::(&decoding_limits)?)), - ObjectId::ReadAtTimeDetails_Encoding_DefaultBinary => Ok(ReadDetails::ReadAtTimeDetails(history_read_details.decode_inner::(&decoding_limits)?)), - _ => panic!() + ObjectId::ReadEventDetails_Encoding_DefaultBinary => Ok(ReadDetails::ReadEventDetails( + history_read_details.decode_inner::(&decoding_limits)?, + )), + ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary => { + Ok(ReadDetails::ReadRawModifiedDetails( + history_read_details + .decode_inner::(&decoding_limits)?, + )) + } + ObjectId::ReadProcessedDetails_Encoding_DefaultBinary => { + Ok(ReadDetails::ReadProcessedDetails( + history_read_details.decode_inner::(&decoding_limits)?, + )) + } + ObjectId::ReadAtTimeDetails_Encoding_DefaultBinary => { + Ok(ReadDetails::ReadAtTimeDetails( + history_read_details.decode_inner::(&decoding_limits)?, + )) + } + _ => panic!(), } } - fn decode_history_update_details(history_update_details: &ExtensionObject, decoding_limits: &DecodingLimits) -> Result { + fn decode_history_update_details( + history_update_details: &ExtensionObject, + decoding_limits: &DecodingLimits, + ) -> Result { let action = Self::node_id_to_historical_update_action(&history_update_details.node_id) .map_err(|_| StatusCode::BadHistoryOperationInvalid)?; match action { - ObjectId::UpdateDataDetails_Encoding_DefaultBinary => Ok(UpdateDetails::UpdateDataDetails(history_update_details.decode_inner::(&decoding_limits)?)), - ObjectId::UpdateStructureDataDetails_Encoding_DefaultBinary => Ok(UpdateDetails::UpdateStructureDataDetails(history_update_details.decode_inner::(&decoding_limits)?)), - ObjectId::UpdateEventDetails_Encoding_DefaultBinary => Ok(UpdateDetails::UpdateEventDetails(history_update_details.decode_inner::(&decoding_limits)?)), - ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary => Ok(UpdateDetails::DeleteRawModifiedDetails(history_update_details.decode_inner::(&decoding_limits)?)), - ObjectId::DeleteAtTimeDetails_Encoding_DefaultBinary => Ok(UpdateDetails::DeleteAtTimeDetails(history_update_details.decode_inner::(&decoding_limits)?)), - ObjectId::DeleteEventDetails_Encoding_DefaultBinary => Ok(UpdateDetails::DeleteEventDetails(history_update_details.decode_inner::(&decoding_limits)?)), - _ => panic!() + ObjectId::UpdateDataDetails_Encoding_DefaultBinary => { + Ok(UpdateDetails::UpdateDataDetails( + history_update_details.decode_inner::(&decoding_limits)?, + )) + } + ObjectId::UpdateStructureDataDetails_Encoding_DefaultBinary => { + Ok(UpdateDetails::UpdateStructureDataDetails( + history_update_details + .decode_inner::(&decoding_limits)?, + )) + } + ObjectId::UpdateEventDetails_Encoding_DefaultBinary => { + Ok(UpdateDetails::UpdateEventDetails( + history_update_details.decode_inner::(&decoding_limits)?, + )) + } + ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary => { + Ok(UpdateDetails::DeleteRawModifiedDetails( + history_update_details + .decode_inner::(&decoding_limits)?, + )) + } + ObjectId::DeleteAtTimeDetails_Encoding_DefaultBinary => { + Ok(UpdateDetails::DeleteAtTimeDetails( + history_update_details.decode_inner::(&decoding_limits)?, + )) + } + ObjectId::DeleteEventDetails_Encoding_DefaultBinary => { + Ok(UpdateDetails::DeleteEventDetails( + history_update_details.decode_inner::(&decoding_limits)?, + )) + } + _ => panic!(), } } - fn do_history_update_details(decoding_limits: &DecodingLimits, server_state: Arc>, address_space: Arc>, u: &ExtensionObject) -> (StatusCode, Option>) { + fn do_history_update_details( + decoding_limits: &DecodingLimits, + server_state: Arc>, + address_space: Arc>, + u: &ExtensionObject, + ) -> (StatusCode, Option>) { match Self::decode_history_update_details(u, &decoding_limits) { Ok(details) => { let server_state = trace_read_lock_unwrap!(server_state); @@ -237,42 +358,56 @@ impl AttributeService { // Call the provider (data or event) let result = match details { UpdateDetails::UpdateDataDetails(details) => { - if let Some(ref historical_data_provider) = server_state.historical_data_provider.as_ref() { + if let Some(ref historical_data_provider) = + server_state.historical_data_provider.as_ref() + { historical_data_provider.update_data_details(address_space, details) } else { Err(StatusCode::BadHistoryOperationUnsupported) } } UpdateDetails::UpdateStructureDataDetails(details) => { - if let Some(ref historical_data_provider) = server_state.historical_data_provider.as_ref() { - historical_data_provider.update_structure_data_details(address_space, details) + if let Some(ref historical_data_provider) = + server_state.historical_data_provider.as_ref() + { + historical_data_provider + .update_structure_data_details(address_space, details) } else { Err(StatusCode::BadHistoryOperationUnsupported) } } UpdateDetails::UpdateEventDetails(details) => { - if let Some(ref historical_event_provider) = server_state.historical_event_provider.as_ref() { + if let Some(ref historical_event_provider) = + server_state.historical_event_provider.as_ref() + { historical_event_provider.update_event_details(address_space, details) } else { Err(StatusCode::BadHistoryOperationUnsupported) } } UpdateDetails::DeleteRawModifiedDetails(details) => { - if let Some(ref historical_data_provider) = server_state.historical_data_provider.as_ref() { - historical_data_provider.delete_raw_modified_details(address_space, details) + if let Some(ref historical_data_provider) = + server_state.historical_data_provider.as_ref() + { + historical_data_provider + .delete_raw_modified_details(address_space, details) } else { Err(StatusCode::BadHistoryOperationUnsupported) } } UpdateDetails::DeleteAtTimeDetails(details) => { - if let Some(ref historical_data_provider) = server_state.historical_data_provider.as_ref() { + if let Some(ref historical_data_provider) = + server_state.historical_data_provider.as_ref() + { historical_data_provider.delete_at_time_details(address_space, details) } else { Err(StatusCode::BadHistoryOperationUnsupported) } } UpdateDetails::DeleteEventDetails(details) => { - if let Some(ref historical_event_provider) = server_state.historical_event_provider.as_ref() { + if let Some(ref historical_event_provider) = + server_state.historical_event_provider.as_ref() + { historical_event_provider.delete_event_details(address_space, details) } else { Err(StatusCode::BadHistoryOperationUnsupported) @@ -281,39 +416,81 @@ impl AttributeService { }; match result { Ok(operation_results) => (StatusCode::Good, Some(operation_results)), - Err(status_code) => (status_code, None) + Err(status_code) => (status_code, None), } } - Err(status_code) => (status_code, None) + Err(status_code) => (status_code, None), } } - fn do_history_read_details(decoding_limits: &DecodingLimits, server_state: Arc>, address_space: Arc>, request: &HistoryReadRequest) -> Result, StatusCode> { + fn do_history_read_details( + decoding_limits: &DecodingLimits, + server_state: Arc>, + address_space: Arc>, + request: &HistoryReadRequest, + ) -> Result, StatusCode> { // TODO enforce operation limits // Validate the action being performed let nodes_to_read = &request.nodes_to_read.as_ref().unwrap(); let timestamps_to_return = request.timestamps_to_return; let release_continuation_points = request.release_continuation_points; - let read_details = Self::decode_history_read_details(&request.history_read_details, &decoding_limits)?; + let read_details = + Self::decode_history_read_details(&request.history_read_details, &decoding_limits)?; let server_state = trace_read_lock_unwrap!(server_state); let results = match read_details { ReadDetails::ReadEventDetails(details) => { - let historical_event_provider = server_state.historical_event_provider.as_ref().ok_or(StatusCode::BadHistoryOperationUnsupported)?; - historical_event_provider.read_event_details(address_space, details, timestamps_to_return, release_continuation_points, &nodes_to_read)? + let historical_event_provider = server_state + .historical_event_provider + .as_ref() + .ok_or(StatusCode::BadHistoryOperationUnsupported)?; + historical_event_provider.read_event_details( + address_space, + details, + timestamps_to_return, + release_continuation_points, + &nodes_to_read, + )? } ReadDetails::ReadRawModifiedDetails(details) => { - let historical_data_provider = server_state.historical_data_provider.as_ref().ok_or(StatusCode::BadHistoryOperationUnsupported)?; - historical_data_provider.read_raw_modified_details(address_space, details, timestamps_to_return, release_continuation_points, &nodes_to_read)? + let historical_data_provider = server_state + .historical_data_provider + .as_ref() + .ok_or(StatusCode::BadHistoryOperationUnsupported)?; + historical_data_provider.read_raw_modified_details( + address_space, + details, + timestamps_to_return, + release_continuation_points, + &nodes_to_read, + )? } ReadDetails::ReadProcessedDetails(details) => { - let historical_data_provider = server_state.historical_data_provider.as_ref().ok_or(StatusCode::BadHistoryOperationUnsupported)?; - historical_data_provider.read_processed_details(address_space, details, timestamps_to_return, release_continuation_points, &nodes_to_read)? + let historical_data_provider = server_state + .historical_data_provider + .as_ref() + .ok_or(StatusCode::BadHistoryOperationUnsupported)?; + historical_data_provider.read_processed_details( + address_space, + details, + timestamps_to_return, + release_continuation_points, + &nodes_to_read, + )? } ReadDetails::ReadAtTimeDetails(details) => { - let historical_data_provider = server_state.historical_data_provider.as_ref().ok_or(StatusCode::BadHistoryOperationUnsupported)?; - historical_data_provider.read_at_time_details(address_space, details, timestamps_to_return, release_continuation_points, &nodes_to_read)? + let historical_data_provider = server_state + .historical_data_provider + .as_ref() + .ok_or(StatusCode::BadHistoryOperationUnsupported)?; + historical_data_provider.read_at_time_details( + address_space, + details, + timestamps_to_return, + release_continuation_points, + &nodes_to_read, + )? } }; Ok(results) @@ -327,14 +504,24 @@ impl AttributeService { } } - fn read_node_value(session: &Session, address_space: &AddressSpace, node_to_read: &ReadValueId, max_age: f64, timestamps_to_return: TimestampsToReturn) -> DataValue { + fn read_node_value( + session: &Session, + address_space: &AddressSpace, + node_to_read: &ReadValueId, + max_age: f64, + timestamps_to_return: TimestampsToReturn, + ) -> DataValue { // Node node found // debug!("read_node_value asked to read node id {}, attribute {}", node_to_read.node_id, node_to_read.attribute_id); let mut result_value = DataValue::null(); if let Some(node) = address_space.find_node(&node_to_read.node_id) { if let Ok(attribute_id) = AttributeId::from_u32(node_to_read.attribute_id) { - let index_range = match node_to_read.index_range.as_ref().parse::() - .map_err(|_| StatusCode::BadIndexRangeInvalid) { + let index_range = match node_to_read + .index_range + .as_ref() + .parse::() + .map_err(|_| StatusCode::BadIndexRangeInvalid) + { Ok(index_range) => index_range, Err(err) => { return DataValue { @@ -350,17 +537,29 @@ impl AttributeService { if !Self::is_readable(session, &node, attribute_id) { // Can't read this node - debug!("read_node_value result for read node id {}, attribute {} is unreadable", node_to_read.node_id, node_to_read.attribute_id); + debug!( + "read_node_value result for read node id {}, attribute {} is unreadable", + node_to_read.node_id, node_to_read.attribute_id + ); result_value.status = Some(StatusCode::BadNotReadable); } else if attribute_id != AttributeId::Value && index_range != NumericRange::None { // Can't supply an index range on a non-Value attribute - debug!("read_node_value result for read node id {}, attribute {} is invalid range", node_to_read.node_id, node_to_read.attribute_id); + debug!( + "read_node_value result for read node id {}, attribute {} is invalid range", + node_to_read.node_id, node_to_read.attribute_id + ); result_value.status = Some(StatusCode::BadIndexRangeNoData); } else if !Self::is_supported_data_encoding(&node_to_read.data_encoding) { // Caller must request binary debug!("read_node_value result for read node id {}, attribute {} is invalid data encoding", node_to_read.node_id, node_to_read.attribute_id); result_value.status = Some(StatusCode::BadDataEncodingInvalid); - } else if let Some(attribute) = node.as_node().get_attribute_max_age(timestamps_to_return, attribute_id, index_range, &node_to_read.data_encoding, max_age) { + } else if let Some(attribute) = node.as_node().get_attribute_max_age( + timestamps_to_return, + attribute_id, + index_range, + &node_to_read.data_encoding, + max_age, + ) { // If caller was reading the user access level, this needs to be modified to // take account of the effective level based on who is logged in. let value = if attribute_id == AttributeId::UserAccessLevel { @@ -368,7 +567,11 @@ impl AttributeService { if let Variant::Byte(value) = value { // The bits from the node are further modified by the session let user_access_level = UserAccessLevel::from_bits_truncate(value); - let user_access_level = session.effective_user_access_level(user_access_level, &node.node_id(), attribute_id); + let user_access_level = session.effective_user_access_level( + user_access_level, + &node.node_id(), + attribute_id, + ); Some(Variant::from(user_access_level.bits())) } else { Some(value) @@ -415,21 +618,34 @@ impl AttributeService { } } } else { - debug!("read_node_value result for read node id {}, attribute {} is invalid/1", node_to_read.node_id, node_to_read.attribute_id); + debug!( + "read_node_value result for read node id {}, attribute {} is invalid/1", + node_to_read.node_id, node_to_read.attribute_id + ); result_value.status = Some(StatusCode::BadAttributeIdInvalid); } } else { - debug!("read_node_value result for read node id {}, attribute {} is invalid/2", node_to_read.node_id, node_to_read.attribute_id); + debug!( + "read_node_value result for read node id {}, attribute {} is invalid/2", + node_to_read.node_id, node_to_read.attribute_id + ); result_value.status = Some(StatusCode::BadAttributeIdInvalid); } } else { - debug!("read_node_value result for read node id {}, attribute {} cannot find node", node_to_read.node_id, node_to_read.attribute_id); + debug!( + "read_node_value result for read node id {}, attribute {} cannot find node", + node_to_read.node_id, node_to_read.attribute_id + ); result_value.status = Some(StatusCode::BadNodeIdUnknown); } result_value } - fn user_access_level(session: &Session, node: &NodeType, attribute_id: AttributeId) -> UserAccessLevel { + fn user_access_level( + session: &Session, + node: &NodeType, + attribute_id: AttributeId, + ) -> UserAccessLevel { let user_access_level = if let NodeType::Variable(ref node) = node { node.user_access_level() } else { @@ -449,16 +665,19 @@ impl AttributeService { // For a variable, the access level controls access to the variable if let NodeType::Variable(_) = node { if attribute_id == AttributeId::Value { - return Self::user_access_level(session, node, attribute_id).contains(UserAccessLevel::CURRENT_WRITE); + return Self::user_access_level(session, node, attribute_id) + .contains(UserAccessLevel::CURRENT_WRITE); } } if let Some(write_mask) = node.as_node().write_mask() { match attribute_id { - AttributeId::Value => if let NodeType::VariableType(_) = node { - write_mask.contains(WriteMask::VALUE_FOR_VARIABLE_TYPE) - } else { - false + AttributeId::Value => { + if let NodeType::VariableType(_) = node { + write_mask.contains(WriteMask::VALUE_FOR_VARIABLE_TYPE) + } else { + false + } } AttributeId::NodeId => write_mask.contains(WriteMask::NODE_ID), AttributeId::NodeClass => write_mask.contains(WriteMask::NODE_CLASS), @@ -477,15 +696,21 @@ impl AttributeService { AttributeId::ArrayDimensions => write_mask.contains(WriteMask::ARRAY_DIMENSIONS), AttributeId::AccessLevel => write_mask.contains(WriteMask::ACCESS_LEVEL), AttributeId::UserAccessLevel => write_mask.contains(WriteMask::USER_ACCESS_LEVEL), - AttributeId::MinimumSamplingInterval => write_mask.contains(WriteMask::MINIMUM_SAMPLING_INTERVAL), + AttributeId::MinimumSamplingInterval => { + write_mask.contains(WriteMask::MINIMUM_SAMPLING_INTERVAL) + } AttributeId::Historizing => write_mask.contains(WriteMask::HISTORIZING), AttributeId::Executable => write_mask.contains(WriteMask::EXECUTABLE), AttributeId::UserExecutable => write_mask.contains(WriteMask::USER_EXECUTABLE), - AttributeId::DataTypeDefinition => write_mask.contains(WriteMask::DATA_TYPE_DEFINITION), + AttributeId::DataTypeDefinition => { + write_mask.contains(WriteMask::DATA_TYPE_DEFINITION) + } AttributeId::RolePermissions => write_mask.contains(WriteMask::ROLE_PERMISSIONS), - AttributeId::AccessRestrictions => write_mask.contains(WriteMask::ACCESS_RESTRICTIONS), + AttributeId::AccessRestrictions => { + write_mask.contains(WriteMask::ACCESS_RESTRICTIONS) + } AttributeId::AccessLevelEx => write_mask.contains(WriteMask::ACCESS_LEVEL_EX), - AttributeId::UserRolePermissions => false // Reserved + AttributeId::UserRolePermissions => false, // Reserved } } else { false @@ -503,8 +728,11 @@ impl AttributeService { */ /// Determine if the value is writable to a Variable node's data type - fn validate_value_to_write(address_space: &AddressSpace, variable: &Variable, value: &Variant) -> bool { - + fn validate_value_to_write( + address_space: &AddressSpace, + variable: &Variable, + value: &Variant, + ) -> bool { // Get the value rank and data type of the variable let value_rank = variable.value_rank(); let node_data_type = variable.data_type(); @@ -524,13 +752,13 @@ impl AttributeService { if node_data_type == DataTypeId::Byte.into() { match value_rank { -2 | -3 | 1 => true, - _ => false + _ => false, } } else { false } } - _ => data_type_matches + _ => data_type_matches, } } else { true @@ -548,21 +776,26 @@ impl AttributeService { valid } - fn write_node_value(session: &Session, address_space: &mut AddressSpace, node_to_write: &WriteValue) -> StatusCode { + fn write_node_value( + session: &Session, + address_space: &mut AddressSpace, + node_to_write: &WriteValue, + ) -> StatusCode { if let Some(node) = address_space.find_node(&node_to_write.node_id) { if let Ok(attribute_id) = AttributeId::from_u32(node_to_write.attribute_id) { let index_range = node_to_write.index_range.as_ref().parse::(); if !Self::is_writable(session, &node, attribute_id) { StatusCode::BadNotWritable - } else if attribute_id != AttributeId::Value && !node_to_write.index_range.is_null() { + } else if attribute_id != AttributeId::Value && !node_to_write.index_range.is_null() + { // Index ranges are not supported on anything other than a value attribute error!("Server does not support indexes for attributes other than Value"); StatusCode::BadWriteNotSupported -// else if node_to_write.value.server_timestamp.is_some() || node_to_write.value.server_picoseconds.is_some() || -// node_to_write.value.source_timestamp.is_some() || node_to_write.value.source_picoseconds.is_some() { -// error!("Server does not support timestamps in write"); -// StatusCode::BadWriteNotSupported + // else if node_to_write.value.server_timestamp.is_some() || node_to_write.value.server_picoseconds.is_some() || + // node_to_write.value.source_timestamp.is_some() || node_to_write.value.source_picoseconds.is_some() { + // error!("Server does not support timestamps in write"); + // StatusCode::BadWriteNotSupported } else if index_range.is_err() { error!("Index range is invalid"); StatusCode::BadIndexRangeInvalid @@ -578,7 +811,7 @@ impl AttributeService { NodeType::Variable(ref variable) => { Self::validate_value_to_write(address_space, variable, value) } - _ => true // Other types don't have this attr but they will reject later during set + _ => true, // Other types don't have this attr but they will reject later during set } } else { true @@ -590,14 +823,16 @@ impl AttributeService { let node = address_space.find_node_mut(&node_to_write.node_id).unwrap(); let result = if attribute_id == AttributeId::Value { match node { - NodeType::Variable(ref mut variable) => { - variable.set_value(index_range, value.clone()) - .map_err(|err| { - error!("Value could not be set to node {} Value, error = {}", node_to_write.node_id, err); - err - }) - } - _ => Err(StatusCode::BadAttributeIdInvalid) + NodeType::Variable(ref mut variable) => variable + .set_value(index_range, value.clone()) + .map_err(|err| { + error!( + "Value could not be set to node {} Value, error = {}", + node_to_write.node_id, err + ); + err + }), + _ => Err(StatusCode::BadAttributeIdInvalid), } } else { let node = node.as_mut_node(); @@ -607,7 +842,11 @@ impl AttributeService { err }) }; - if result.is_err() { result.unwrap_err() } else { StatusCode::Good } + if result.is_err() { + result.unwrap_err() + } else { + StatusCode::Good + } } } else { error!("Server does not support missing value in write"); diff --git a/server/src/services/audit.rs b/server/src/services/audit.rs index 5601a40cc..fbe5de476 100644 --- a/server/src/services/audit.rs +++ b/server/src/services/audit.rs @@ -4,16 +4,11 @@ use std::sync::{Arc, RwLock}; -use opcua_types::{ - *, status_code::StatusCode, -}; +use opcua_types::{status_code::StatusCode, *}; use crate::{ address_space::address_space::AddressSpace, - events::audit::{ - certificate_events::*, - session_events::*, - }, + events::audit::{certificate_events::*, session_events::*}, session::Session, state::ServerState, }; @@ -26,7 +21,14 @@ fn next_node_id(address_space: Arc>) -> NodeId { NodeId::next_numeric(audit_namespace) } -pub fn log_create_session(server_state: &ServerState, session: &Session, address_space: Arc>, status: bool, revised_session_timeout: Duration, request: &CreateSessionRequest) { +pub fn log_create_session( + server_state: &ServerState, + session: &Session, + address_space: Arc>, + status: bool, + revised_session_timeout: Duration, + request: &CreateSessionRequest, +) { let node_id = next_node_id(address_space); let now = DateTime::now(); @@ -57,7 +59,13 @@ pub fn log_create_session(server_state: &ServerState, session: &Session, address let _ = server_state.raise_and_log(event); } -pub fn log_activate_session(server_state: &ServerState, session: &Session, address_space: Arc>, status: bool, request: &ActivateSessionRequest) { +pub fn log_activate_session( + server_state: &ServerState, + session: &Session, + address_space: Arc>, + status: bool, + request: &ActivateSessionRequest, +) { let node_id = next_node_id(address_space); let now = DateTime::now(); @@ -71,11 +79,12 @@ pub fn log_activate_session(server_state: &ServerState, session: &Session, addre let event = if status { // Client software certificates - let event = if let Some(ref client_software_certificates) = request.client_software_certificates { - event.client_software_certificates(client_software_certificates.clone()) - } else { - event - }; + let event = + if let Some(ref client_software_certificates) = request.client_software_certificates { + event.client_software_certificates(client_software_certificates.clone()) + } else { + event + }; // TODO user identity token - should we serialize the entire token in an audit log, or just the policy uri? // from a security perspective, logging credentials is bad. @@ -88,21 +97,36 @@ pub fn log_activate_session(server_state: &ServerState, session: &Session, addre let _ = server_state.raise_and_log(event); } -pub fn log_close_session(server_state: &ServerState, session: &Session, address_space: Arc>, status: bool, request: &CloseSessionRequest) { +pub fn log_close_session( + server_state: &ServerState, + session: &Session, + address_space: Arc>, + status: bool, + request: &CloseSessionRequest, +) { let node_id = next_node_id(address_space); let now = DateTime::now(); let session_id = session.session_id().clone(); - let event = AuditSessionEventType::new_close_session(node_id, now, AuditCloseSessionReason::CloseSession) - .status(status) - .client_user_id(session.client_user_id()) - .client_audit_entry_id(request.request_header.audit_entry_id.clone()) - .session_id(session_id); + let event = AuditSessionEventType::new_close_session( + node_id, + now, + AuditCloseSessionReason::CloseSession, + ) + .status(status) + .client_user_id(session.client_user_id()) + .client_audit_entry_id(request.request_header.audit_entry_id.clone()) + .session_id(session_id); let _ = server_state.raise_and_log(event); } -pub fn log_certificate_error(server_state: &ServerState, address_space: Arc>, status_code: StatusCode, request_header: &RequestHeader) { +pub fn log_certificate_error( + server_state: &ServerState, + address_space: Arc>, + status_code: StatusCode, + request_header: &RequestHeader, +) { let node_id = next_node_id(address_space); let now = DateTime::now(); diff --git a/server/src/services/discovery.rs b/server/src/services/discovery.rs index e22aaecc6..62afccdf8 100644 --- a/server/src/services/discovery.rs +++ b/server/src/services/discovery.rs @@ -5,16 +5,18 @@ use std::sync::{Arc, RwLock}; use opcua_core::supported_message::SupportedMessage; -use opcua_types::{*, status_code::StatusCode}; +use opcua_types::{status_code::StatusCode, *}; -use crate::{services::Service, state::ServerState}; use crate::prelude::Config; +use crate::{services::Service, state::ServerState}; /// The discovery service. Allows a server to return the endpoints that it supports. pub(crate) struct DiscoveryService; impl Service for DiscoveryService { - fn name(&self) -> String { String::from("DiscoveryService") } + fn name(&self) -> String { + String::from("DiscoveryService") + } } impl DiscoveryService { @@ -22,7 +24,11 @@ impl DiscoveryService { DiscoveryService {} } - pub fn get_endpoints(&self, server_state: Arc>, request: &GetEndpointsRequest) -> SupportedMessage { + pub fn get_endpoints( + &self, + server_state: Arc>, + request: &GetEndpointsRequest, + ) -> SupportedMessage { let server_state = trace_read_lock_unwrap!(server_state); // TODO some of the arguments in the request are ignored @@ -34,22 +40,34 @@ impl DiscoveryService { GetEndpointsResponse { response_header: ResponseHeader::new_good(&request.request_header), endpoints, - }.into() + } + .into() } - pub fn register_server(&self, _server_state: Arc>, request: &RegisterServerRequest) -> SupportedMessage { + pub fn register_server( + &self, + _server_state: Arc>, + request: &RegisterServerRequest, + ) -> SupportedMessage { self.service_fault(&request.request_header, StatusCode::BadNotSupported) } - pub fn register_server2(&self, _server_state: Arc>, request: &RegisterServer2Request) -> SupportedMessage { + pub fn register_server2( + &self, + _server_state: Arc>, + request: &RegisterServer2Request, + ) -> SupportedMessage { self.service_fault(&request.request_header, StatusCode::BadNotSupported) } - pub fn find_servers(&self, server_state: Arc>, request: &FindServersRequest) -> SupportedMessage { - + pub fn find_servers( + &self, + server_state: Arc>, + request: &FindServersRequest, + ) -> SupportedMessage { let server_state = trace_read_lock_unwrap!(server_state); - let application_description= { + let application_description = { let config = trace_read_lock_unwrap!(server_state.config); config.application_description() }; @@ -65,6 +83,7 @@ impl DiscoveryService { FindServersResponse { response_header: ResponseHeader::new_good(&request.request_header), servers, - }.into() + } + .into() } } diff --git a/server/src/services/message_handler.rs b/server/src/services/message_handler.rs index a182b8c96..e526fc77c 100644 --- a/server/src/services/message_handler.rs +++ b/server/src/services/message_handler.rs @@ -8,20 +8,15 @@ use chrono::Utc; use opcua_core::supported_message::SupportedMessage; use opcua_crypto::{CertificateStore, SecurityPolicy}; -use opcua_types::{*, status_code::StatusCode}; +use opcua_types::{status_code::StatusCode, *}; use crate::{ address_space::AddressSpace, comms::tcp_transport::MessageSender, services::{ - attribute::AttributeService, - discovery::DiscoveryService, - method::MethodService, - monitored_item::MonitoredItemService, - node_management::NodeManagementService, - query::QueryService, - session::SessionService, - subscription::SubscriptionService, + attribute::AttributeService, discovery::DiscoveryService, method::MethodService, + monitored_item::MonitoredItemService, node_management::NodeManagementService, + query::QueryService, session::SessionService, subscription::SubscriptionService, view::ViewService, }, session::Session, @@ -60,7 +55,12 @@ pub struct MessageHandler { } impl MessageHandler { - pub fn new(certificate_store: Arc>, server_state: Arc>, session: Arc>, address_space: Arc>) -> MessageHandler { + pub fn new( + certificate_store: Arc>, + server_state: Arc>, + session: Arc>, + address_space: Arc>, + ) -> MessageHandler { MessageHandler { certificate_store, server_state, @@ -78,7 +78,12 @@ impl MessageHandler { } } - pub fn handle_message(&mut self, request_id: u32, message: &SupportedMessage, sender: &MessageSender) -> Result<(), StatusCode> { + pub fn handle_message( + &mut self, + request_id: u32, + message: &SupportedMessage, + sender: &MessageSender, + ) -> Result<(), StatusCode> { // Note the order of arguments for all these services is the order that they must be locked in, // // 1. ServerState @@ -90,207 +95,412 @@ impl MessageHandler { let address_space = self.address_space.clone(); let response = match message { - // Discovery Service Set, OPC UA Part 4, Section 5.4 SupportedMessage::GetEndpointsRequest(request) => { Some(self.discovery_service.get_endpoints(server_state, request)) } - SupportedMessage::RegisterServerRequest(request) => { - Some(self.discovery_service.register_server(server_state, request)) - } + SupportedMessage::RegisterServerRequest(request) => Some( + self.discovery_service + .register_server(server_state, request), + ), - SupportedMessage::RegisterServer2Request(request) => { - Some(self.discovery_service.register_server2(server_state, request)) - } + SupportedMessage::RegisterServer2Request(request) => Some( + self.discovery_service + .register_server2(server_state, request), + ), SupportedMessage::FindServersRequest(request) => { Some(self.discovery_service.find_servers(server_state, request)) } // Session Service Set, OPC UA Part 4, Section 5.6 - SupportedMessage::CreateSessionRequest(request) => { let certificate_store = trace_read_lock_unwrap!(self.certificate_store); - Some(self.session_service.create_session(&certificate_store, server_state, session, address_space, request)) - } - SupportedMessage::CloseSessionRequest(request) => { - Some(self.session_service.close_session(server_state, session, address_space, request)) - } + Some(self.session_service.create_session( + &certificate_store, + server_state, + session, + address_space, + request, + )) + } + SupportedMessage::CloseSessionRequest(request) => Some( + self.session_service + .close_session(server_state, session, address_space, request), + ), // NOTE - ALL THE REQUESTS BEYOND THIS POINT MUST BE VALIDATED AGAINST THE SESSION - SupportedMessage::ActivateSessionRequest(request) => { Self::validate_service_request(message, session.clone(), "", move || { - self.session_service.activate_session(server_state, session, address_space, request) + self.session_service.activate_session( + server_state, + session, + address_space, + request, + ) }) } // NOTE - ALL THE REQUESTS BEYOND THIS POINT MUST BE VALIDATED AGAINST THE SESSION AND // HAVE AN ACTIVE SESSION - SupportedMessage::CancelRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), "", move || { - self.session_service.cancel(server_state, session, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + "", + move || self.session_service.cancel(server_state, session, request), + ) } // NodeManagement Service Set, OPC UA Part 4, Section 5.7 - SupportedMessage::AddNodesRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), ADD_NODES_COUNT, move || { - self.node_management_service.add_nodes(server_state, session, address_space, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + ADD_NODES_COUNT, + move || { + self.node_management_service.add_nodes( + server_state, + session, + address_space, + request, + ) + }, + ) } SupportedMessage::AddReferencesRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), ADD_REFERENCES_COUNT, move || { - self.node_management_service.add_references(server_state, session, address_space, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + ADD_REFERENCES_COUNT, + move || { + self.node_management_service.add_references( + server_state, + session, + address_space, + request, + ) + }, + ) } SupportedMessage::DeleteNodesRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), DELETE_NODES_COUNT, move || { - self.node_management_service.delete_nodes(server_state, session, address_space, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + DELETE_NODES_COUNT, + move || { + self.node_management_service.delete_nodes( + server_state, + session, + address_space, + request, + ) + }, + ) } SupportedMessage::DeleteReferencesRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), DELETE_REFERENCES_COUNT, move || { - self.node_management_service.delete_references(server_state, session, address_space, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + DELETE_REFERENCES_COUNT, + move || { + self.node_management_service.delete_references( + server_state, + session, + address_space, + request, + ) + }, + ) } // View Service Set, OPC UA Part 4, Section 5.8 - SupportedMessage::BrowseRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), BROWSE_COUNT, move || { - self.view_service.browse(server_state, session, address_space, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + BROWSE_COUNT, + move || { + self.view_service + .browse(server_state, session, address_space, request) + }, + ) } SupportedMessage::BrowseNextRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), BROWSE_NEXT_COUNT, move || { - self.view_service.browse_next(session, address_space, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + BROWSE_NEXT_COUNT, + move || { + self.view_service + .browse_next(session, address_space, request) + }, + ) } SupportedMessage::TranslateBrowsePathsToNodeIdsRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), TRANSLATE_BROWSE_PATHS_TO_NODE_IDS_COUNT, move || { - self.view_service.translate_browse_paths_to_node_ids(server_state, address_space, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + TRANSLATE_BROWSE_PATHS_TO_NODE_IDS_COUNT, + move || { + self.view_service.translate_browse_paths_to_node_ids( + server_state, + address_space, + request, + ) + }, + ) } SupportedMessage::RegisterNodesRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), REGISTER_NODES_COUNT, move || { - self.view_service.register_nodes(server_state, session, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + REGISTER_NODES_COUNT, + move || { + self.view_service + .register_nodes(server_state, session, request) + }, + ) } SupportedMessage::UnregisterNodesRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), UNREGISTER_NODES_COUNT, move || { - self.view_service.unregister_nodes(server_state, session, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + UNREGISTER_NODES_COUNT, + move || { + self.view_service + .unregister_nodes(server_state, session, request) + }, + ) } // Query Service Set, OPC UA Part 4, Section 5.9 - SupportedMessage::QueryFirstRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), READ_COUNT, move || { - self.query_service.query_first(server_state, session, address_space, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + READ_COUNT, + move || { + self.query_service.query_first( + server_state, + session, + address_space, + request, + ) + }, + ) } SupportedMessage::QueryNextRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), READ_COUNT, move || { - self.query_service.query_next(server_state, session, address_space, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + READ_COUNT, + move || { + self.query_service + .query_next(server_state, session, address_space, request) + }, + ) } // Attribute Service Set, OPC UA Part 4, Section 5.10 - SupportedMessage::ReadRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), READ_COUNT, move || { - self.attribute_service.read(server_state, session, address_space, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + READ_COUNT, + move || { + self.attribute_service + .read(server_state, session, address_space, request) + }, + ) } SupportedMessage::HistoryReadRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), HISTORY_READ_COUNT, move || { - self.attribute_service.history_read(server_state, session, address_space, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + HISTORY_READ_COUNT, + move || { + self.attribute_service.history_read( + server_state, + session, + address_space, + request, + ) + }, + ) } SupportedMessage::WriteRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), WRITE_COUNT, move || { - self.attribute_service.write(server_state, session, address_space, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + WRITE_COUNT, + move || { + self.attribute_service + .write(server_state, session, address_space, request) + }, + ) } SupportedMessage::HistoryUpdateRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), HISTORY_UPDATE_COUNT, move || { - self.attribute_service.history_update(server_state, session, address_space, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + HISTORY_UPDATE_COUNT, + move || { + self.attribute_service.history_update( + server_state, + session, + address_space, + request, + ) + }, + ) } // Method Service Set, OPC UA Part 4, Section 5.11 - SupportedMessage::CallRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), CALL_COUNT, move || { - self.method_service.call(server_state, session, address_space, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + CALL_COUNT, + move || { + self.method_service + .call(server_state, session, address_space, request) + }, + ) } // Monitored Item Service Set, OPC UA Part 4, Section 5.12 - SupportedMessage::CreateMonitoredItemsRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), CREATE_MONITORED_ITEMS_COUNT, move || { - self.monitored_item_service.create_monitored_items(server_state, session, address_space, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + CREATE_MONITORED_ITEMS_COUNT, + move || { + self.monitored_item_service.create_monitored_items( + server_state, + session, + address_space, + request, + ) + }, + ) } SupportedMessage::ModifyMonitoredItemsRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), MODIFY_MONITORED_ITEMS_COUNT, move || { - self.monitored_item_service.modify_monitored_items(session, address_space, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + MODIFY_MONITORED_ITEMS_COUNT, + move || { + self.monitored_item_service.modify_monitored_items( + session, + address_space, + request, + ) + }, + ) } SupportedMessage::SetMonitoringModeRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), SET_MONITORING_MODE_COUNT, move || { - self.monitored_item_service.set_monitoring_mode(session, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + SET_MONITORING_MODE_COUNT, + move || { + self.monitored_item_service + .set_monitoring_mode(session, request) + }, + ) } SupportedMessage::SetTriggeringRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), SET_TRIGGERING_COUNT, move || { - self.monitored_item_service.set_triggering(session, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + SET_TRIGGERING_COUNT, + move || self.monitored_item_service.set_triggering(session, request), + ) } SupportedMessage::DeleteMonitoredItemsRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), DELETE_MONITORED_ITEMS_COUNT, move || { - self.monitored_item_service.delete_monitored_items(session, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + DELETE_MONITORED_ITEMS_COUNT, + move || { + self.monitored_item_service + .delete_monitored_items(session, request) + }, + ) } // Subscription Service Set, OPC UA Part 4, Section 5.13 - SupportedMessage::CreateSubscriptionRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), CREATE_SUBSCRIPTION_COUNT, move || { - self.subscription_service.create_subscription(server_state, session, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + CREATE_SUBSCRIPTION_COUNT, + move || { + self.subscription_service.create_subscription( + server_state, + session, + request, + ) + }, + ) } SupportedMessage::ModifySubscriptionRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), MODIFY_SUBSCRIPTION_COUNT, move || { - self.subscription_service.modify_subscription(server_state, session, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + MODIFY_SUBSCRIPTION_COUNT, + move || { + self.subscription_service.modify_subscription( + server_state, + session, + request, + ) + }, + ) } SupportedMessage::SetPublishingModeRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), SET_PUBLISHING_MODE_COUNT, move || { - self.subscription_service.set_publishing_mode(session, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + SET_PUBLISHING_MODE_COUNT, + move || { + self.subscription_service + .set_publishing_mode(session, request) + }, + ) } SupportedMessage::DeleteSubscriptionsRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), DELETE_SUBSCRIPTIONS_COUNT, move || { - self.subscription_service.delete_subscriptions(session, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + DELETE_SUBSCRIPTIONS_COUNT, + move || { + self.subscription_service + .delete_subscriptions(session, request) + }, + ) } SupportedMessage::TransferSubscriptionsRequest(request) => { - Self::validate_active_session_service_request(message, session.clone(), TRANSFER_SUBSCRIPTIONS_COUNT, move || { - self.subscription_service.transfer_subscriptions(session, request) - }) + Self::validate_active_session_service_request( + message, + session.clone(), + TRANSFER_SUBSCRIPTIONS_COUNT, + move || { + self.subscription_service + .transfer_subscriptions(session, request) + }, + ) } SupportedMessage::PublishRequest(request) => { - if let Err(response) = Self::is_authentication_token_valid(session.clone(), &request.request_header) { + if let Err(response) = + Self::is_authentication_token_valid(session.clone(), &request.request_header) + { Some(response) } else { // TODO publish request diagnostics have to be done asynchronously too @@ -298,19 +508,30 @@ impl MessageHandler { // Unlike other calls which return immediately, this one is asynchronous - the // request is queued and the response will come back out of sequence some time in // the future. - self.subscription_service.async_publish(&Utc::now(), session, address_space, request_id, &request) + self.subscription_service.async_publish( + &Utc::now(), + session, + address_space, + request_id, + &request, + ) } } SupportedMessage::RepublishRequest(request) => { - Self::validate_active_session_service_request(&message, session.clone(), REPUBLISH_COUNT, move || { - self.subscription_service.republish(session, request) - }) + Self::validate_active_session_service_request( + &message, + session.clone(), + REPUBLISH_COUNT, + move || self.subscription_service.republish(session, request), + ) } // Unhandle messages - message => { - debug!("Message handler does not handle this kind of message {:?}", message); + debug!( + "Message handler does not handle this kind of message {:?}", + message + ); return Err(StatusCode::BadServiceUnsupported); } }; @@ -326,7 +547,10 @@ impl MessageHandler { /// /// The request header should contain the session authentication token issued during a /// CreateSession or the request is invalid. An invalid token can cause the session to close. - fn is_authentication_token_valid(session: Arc>, request_header: &RequestHeader) -> Result<(), SupportedMessage> { + fn is_authentication_token_valid( + session: Arc>, + request_header: &RequestHeader, + ) -> Result<(), SupportedMessage> { let mut session = trace_write_lock_unwrap!(session); // TODO if session's token is null, it might be possible to retrieve session state from a // previously closed session and reassociate it if the authentication token is recognized @@ -335,10 +559,16 @@ impl MessageHandler { let secure_channel = trace_read_lock_unwrap!(secure_channel); secure_channel.security_policy() != SecurityPolicy::None }; - if is_secure_connection && session.authentication_token() != &request_header.authentication_token { + if is_secure_connection + && session.authentication_token() != &request_header.authentication_token + { // Session should terminate session.terminate_session(); - error!("supplied authentication token {:?} does not match session's expected token {:?}", request_header.authentication_token, session.authentication_token()); + error!( + "supplied authentication token {:?} does not match session's expected token {:?}", + request_header.authentication_token, + session.authentication_token() + ); Err(ServiceFault::new(request_header, StatusCode::BadIdentityTokenRejected).into()) } else { Ok(()) @@ -346,7 +576,11 @@ impl MessageHandler { } /// Tests if this request should be rejected because of a session timeout - fn is_session_timed_out(session: Arc>, request_header: &RequestHeader, now: DateTimeUtc) -> Result<(), SupportedMessage> { + fn is_session_timed_out( + session: Arc>, + request_header: &RequestHeader, + now: DateTimeUtc, + ) -> Result<(), SupportedMessage> { let mut session = trace_write_lock_unwrap!(session); let last_service_request_timestamp = session.last_service_request_timestamp(); let elapsed = now - last_service_request_timestamp; @@ -366,7 +600,10 @@ impl MessageHandler { } /// Test if the session is activated - fn is_session_activated(session: Arc>, request_header: &RequestHeader) -> Result<(), SupportedMessage> { + fn is_session_activated( + session: Arc>, + request_header: &RequestHeader, + ) -> Result<(), SupportedMessage> { let session = trace_read_lock_unwrap!(session); if !session.is_activated() { error!("Session is not activated so request fails"); @@ -377,17 +614,24 @@ impl MessageHandler { } /// Validate the security of the call - fn validate_service_request(request: &SupportedMessage, session: Arc>, - diagnostic_key: &'static str, - action: F) - -> Option - where F: FnOnce() -> SupportedMessage + fn validate_service_request( + request: &SupportedMessage, + session: Arc>, + diagnostic_key: &'static str, + action: F, + ) -> Option + where + F: FnOnce() -> SupportedMessage, { let now = Utc::now(); let request_header = request.request_header(); - let (response, authorized) = if let Err(response) = Self::is_authentication_token_valid(session.clone(), request_header) { + let (response, authorized) = if let Err(response) = + Self::is_authentication_token_valid(session.clone(), request_header) + { (response, false) - } else if let Err(response) = Self::is_session_timed_out(session.clone(), request_header, now) { + } else if let Err(response) = + Self::is_session_timed_out(session.clone(), request_header, now) + { (response, false) } else { let response = action(); @@ -400,16 +644,26 @@ impl MessageHandler { } /// Validate the security of the call and also for an active session - fn validate_active_session_service_request(request: &SupportedMessage, session: Arc>, diagnostic_key: &'static str, action: F) -> Option - where F: FnOnce() -> SupportedMessage + fn validate_active_session_service_request( + request: &SupportedMessage, + session: Arc>, + diagnostic_key: &'static str, + action: F, + ) -> Option + where + F: FnOnce() -> SupportedMessage, { let now = Utc::now(); let request_header = request.request_header(); - let (response, authorized) = if let Err(response) = Self::is_authentication_token_valid(session.clone(), request_header) { + let (response, authorized) = if let Err(response) = + Self::is_authentication_token_valid(session.clone(), request_header) + { (response, false) } else if let Err(response) = Self::is_session_activated(session.clone(), request_header) { (response, false) - } else if let Err(response) = Self::is_session_timed_out(session.clone(), request_header, now) { + } else if let Err(response) = + Self::is_session_timed_out(session.clone(), request_header, now) + { (response, false) } else { let response = action(); @@ -431,7 +685,12 @@ impl MessageHandler { } /// Increment count of service call in session diagnostics - fn diag_service_response(session: Arc>, authorized: bool, response: &SupportedMessage, diagnostic_key: &'static str) { + fn diag_service_response( + session: Arc>, + authorized: bool, + response: &SupportedMessage, + diagnostic_key: &'static str, + ) { let session = trace_read_lock_unwrap!(session); let session_diagnostics = session.session_diagnostics(); let mut session_diagnostics = trace_write_lock_unwrap!(session_diagnostics); diff --git a/server/src/services/method.rs b/server/src/services/method.rs index cefba153e..86bb1770f 100644 --- a/server/src/services/method.rs +++ b/server/src/services/method.rs @@ -5,20 +5,17 @@ use std::sync::{Arc, RwLock}; use opcua_core::supported_message::SupportedMessage; -use opcua_types::{*, status_code::StatusCode}; +use opcua_types::{status_code::StatusCode, *}; -use crate::{ - address_space::AddressSpace, - services::Service, - session::Session, - state::ServerState, -}; +use crate::{address_space::AddressSpace, services::Service, session::Session, state::ServerState}; /// The method service. Allows a client to call a method on the server. pub(crate) struct MethodService; impl Service for MethodService { - fn name(&self) -> String { String::from("MethodService") } + fn name(&self) -> String { + String::from("MethodService") + } } impl MethodService { @@ -26,35 +23,51 @@ impl MethodService { MethodService {} } - pub fn call(&self, server_state: Arc>, session: Arc>, address_space: Arc>, request: &CallRequest) -> SupportedMessage { + pub fn call( + &self, + server_state: Arc>, + session: Arc>, + address_space: Arc>, + request: &CallRequest, + ) -> SupportedMessage { if let Some(ref calls) = request.methods_to_call { let server_state = trace_read_lock_unwrap!(server_state); if calls.len() <= server_state.operational_limits.max_nodes_per_method_call { let mut session = trace_write_lock_unwrap!(session); let mut address_space = trace_write_lock_unwrap!(address_space); - let results: Vec = calls.iter().map(|request| { - trace!("Calling to {:?} on {:?}", request.method_id, request.object_id); + let results: Vec = calls + .iter() + .map(|request| { + trace!( + "Calling to {:?} on {:?}", + request.method_id, + request.object_id + ); - // Note: Method invocations that modify the address space, write a value, or modify the - // state of the system (acknowledge, batch sequencing or other system changes) must - // generate an AuditUpdateMethodEventType or a subtype of it. + // Note: Method invocations that modify the address space, write a value, or modify the + // state of the system (acknowledge, batch sequencing or other system changes) must + // generate an AuditUpdateMethodEventType or a subtype of it. - // Call the method via whatever is registered in the address space - match address_space.call_method(&server_state, &mut session, request) { - Ok(response) => response, - Err(status_code) => { - // Call didn't work for some reason - error!("Call to {:?} on {:?} failed with status code {}", request.method_id, request.object_id, status_code); - CallMethodResult { - status_code, - input_argument_results: None, - input_argument_diagnostic_infos: None, - output_arguments: None, + // Call the method via whatever is registered in the address space + match address_space.call_method(&server_state, &mut session, request) { + Ok(response) => response, + Err(status_code) => { + // Call didn't work for some reason + error!( + "Call to {:?} on {:?} failed with status code {}", + request.method_id, request.object_id, status_code + ); + CallMethodResult { + status_code, + input_argument_results: None, + input_argument_diagnostic_infos: None, + output_arguments: None, + } } } - } - }).collect(); + }) + .collect(); // Produce response let response = CallResponse { response_header: ResponseHeader::new_good(&request.request_header), diff --git a/server/src/services/mod.rs b/server/src/services/mod.rs index 449725bfe..080b5bdfc 100644 --- a/server/src/services/mod.rs +++ b/server/src/services/mod.rs @@ -3,7 +3,7 @@ // Copyright (C) 2017-2020 Adam Lock use opcua_core::supported_message::SupportedMessage; -use opcua_types::{RequestHeader, ServiceFault, status_code::StatusCode}; +use opcua_types::{status_code::StatusCode, RequestHeader, ServiceFault}; pub mod message_handler; @@ -11,8 +11,17 @@ pub mod message_handler; trait Service { fn name(&self) -> String; - fn service_fault(&self, request_header: &RequestHeader, service_result: StatusCode) -> SupportedMessage { - warn!("Service {}, request handle {} generated a service fault with status code {}", self.name(), request_header.request_handle, service_result); + fn service_fault( + &self, + request_header: &RequestHeader, + service_result: StatusCode, + ) -> SupportedMessage { + warn!( + "Service {}, request handle {} generated a service fault with status code {}", + self.name(), + request_header.request_handle, + service_result + ); ServiceFault::new(request_header, service_result).into() } } diff --git a/server/src/services/monitored_item.rs b/server/src/services/monitored_item.rs index 1b05553e0..f4efdad8b 100644 --- a/server/src/services/monitored_item.rs +++ b/server/src/services/monitored_item.rs @@ -5,19 +5,17 @@ use std::sync::{Arc, RwLock}; use opcua_core::supported_message::SupportedMessage; -use opcua_types::{*, status_code::StatusCode}; +use opcua_types::{status_code::StatusCode, *}; -use crate::{ - address_space::AddressSpace, - services::Service, - session::Session, state::ServerState, -}; +use crate::{address_space::AddressSpace, services::Service, session::Session, state::ServerState}; /// The monitored item service. Allows client to create, modify and delete monitored items on a subscription. pub(crate) struct MonitoredItemService; impl Service for MonitoredItemService { - fn name(&self) -> String { String::from("MonitoredItemService") } + fn name(&self) -> String { + String::from("MonitoredItemService") + } } impl MonitoredItemService { @@ -26,7 +24,13 @@ impl MonitoredItemService { } /// Implementation of CreateMonitoredItems service. See OPC Unified Architecture, Part 4 5.12.2 - pub fn create_monitored_items(&self, server_state: Arc>, session: Arc>, address_space: Arc>, request: &CreateMonitoredItemsRequest) -> SupportedMessage { + pub fn create_monitored_items( + &self, + server_state: Arc>, + session: Arc>, + address_space: Arc>, + request: &CreateMonitoredItemsRequest, + ) -> SupportedMessage { if is_empty_option_vec!(request.items_to_create) { self.service_fault(&request.request_header, StatusCode::BadNothingToDo) } else { @@ -36,9 +40,16 @@ impl MonitoredItemService { let items_to_create = request.items_to_create.as_ref().unwrap(); // Find subscription and add items to it - if let Some(subscription) = session.subscriptions_mut().get_mut(request.subscription_id) { + if let Some(subscription) = session.subscriptions_mut().get_mut(request.subscription_id) + { let now = chrono::Utc::now(); - let results = Some(subscription.create_monitored_items(&address_space, &now, request.timestamps_to_return, items_to_create, server_state.max_monitored_items_per_sub)); + let results = Some(subscription.create_monitored_items( + &address_space, + &now, + request.timestamps_to_return, + items_to_create, + server_state.max_monitored_items_per_sub, + )); let response = CreateMonitoredItemsResponse { response_header: ResponseHeader::new_good(&request.request_header), results, @@ -47,13 +58,21 @@ impl MonitoredItemService { response.into() } else { // No matching subscription - self.service_fault(&request.request_header, StatusCode::BadSubscriptionIdInvalid) + self.service_fault( + &request.request_header, + StatusCode::BadSubscriptionIdInvalid, + ) } } } /// Implementation of ModifyMonitoredItems service. See OPC Unified Architecture, Part 4 5.12.3 - pub fn modify_monitored_items(&self, session: Arc>, address_space: Arc>, request: &ModifyMonitoredItemsRequest) -> SupportedMessage { + pub fn modify_monitored_items( + &self, + session: Arc>, + address_space: Arc>, + request: &ModifyMonitoredItemsRequest, + ) -> SupportedMessage { if is_empty_option_vec!(request.items_to_modify) { self.service_fault(&request.request_header, StatusCode::BadNothingToDo) } else { @@ -63,21 +82,33 @@ impl MonitoredItemService { // Find subscription and modify items in it let subscription_id = request.subscription_id; if let Some(subscription) = session.subscriptions_mut().get_mut(subscription_id) { - let results = Some(subscription.modify_monitored_items(&address_space, request.timestamps_to_return, items_to_modify)); + let results = Some(subscription.modify_monitored_items( + &address_space, + request.timestamps_to_return, + items_to_modify, + )); ModifyMonitoredItemsResponse { response_header: ResponseHeader::new_good(&request.request_header), results, diagnostic_infos: None, - }.into() + } + .into() } else { // No matching subscription - self.service_fault(&request.request_header, StatusCode::BadSubscriptionIdInvalid) + self.service_fault( + &request.request_header, + StatusCode::BadSubscriptionIdInvalid, + ) } } } /// Implementation of SetMonitoringMode service. See OPC Unified Architecture, Part 4 5.12.4 - pub fn set_monitoring_mode(&self, session: Arc>, request: &SetMonitoringModeRequest) -> SupportedMessage { + pub fn set_monitoring_mode( + &self, + session: Arc>, + request: &SetMonitoringModeRequest, + ) -> SupportedMessage { if is_empty_option_vec!(request.monitored_item_ids) { self.service_fault(&request.request_header, StatusCode::BadNothingToDo) } else { @@ -86,61 +117,89 @@ impl MonitoredItemService { let subscription_id = request.subscription_id; if let Some(subscription) = session.subscriptions_mut().get_mut(subscription_id) { let monitoring_mode = request.monitoring_mode; - let results = monitored_item_ids.iter().map(|i| { - subscription.set_monitoring_mode(*i, monitoring_mode) - }).collect(); + let results = monitored_item_ids + .iter() + .map(|i| subscription.set_monitoring_mode(*i, monitoring_mode)) + .collect(); SetMonitoringModeResponse { response_header: ResponseHeader::new_good(&request.request_header), results: Some(results), diagnostic_infos: None, - }.into() + } + .into() } else { - self.service_fault(&request.request_header, StatusCode::BadSubscriptionIdInvalid) + self.service_fault( + &request.request_header, + StatusCode::BadSubscriptionIdInvalid, + ) } } } /// Implementation of SetTriggering service. See OPC Unified Architecture, Part 4 5.12.5 - pub fn set_triggering(&self, session: Arc>, request: &SetTriggeringRequest) -> SupportedMessage { - if is_empty_option_vec!(request.links_to_add) && is_empty_option_vec!(request.links_to_remove) { + pub fn set_triggering( + &self, + session: Arc>, + request: &SetTriggeringRequest, + ) -> SupportedMessage { + if is_empty_option_vec!(request.links_to_add) + && is_empty_option_vec!(request.links_to_remove) + { self.service_fault(&request.request_header, StatusCode::BadNothingToDo) } else { let mut session = trace_write_lock_unwrap!(session); let links_to_add = match request.links_to_add { Some(ref links_to_add) => &links_to_add[..], - None => &[] + None => &[], }; let links_to_remove = match request.links_to_remove { Some(ref links_to_remove) => &links_to_remove[..], - None => &[] + None => &[], }; // Set the triggering on the subscription. let subscription_id = request.subscription_id; if let Some(subscription) = session.subscriptions_mut().get_mut(subscription_id) { - match subscription.set_triggering(request.triggering_item_id, links_to_add, links_to_remove) { + match subscription.set_triggering( + request.triggering_item_id, + links_to_add, + links_to_remove, + ) { Ok((add_results, remove_results)) => { let response = SetTriggeringResponse { response_header: ResponseHeader::new_good(&request.request_header), - add_results: if request.links_to_add.is_some() { Some(add_results) } else { None }, + add_results: if request.links_to_add.is_some() { + Some(add_results) + } else { + None + }, add_diagnostic_infos: None, - remove_results: if request.links_to_remove.is_some() { Some(remove_results) } else { None }, + remove_results: if request.links_to_remove.is_some() { + Some(remove_results) + } else { + None + }, remove_diagnostic_infos: None, }; response.into() } - Err(err) => { - self.service_fault(&request.request_header, err) - } + Err(err) => self.service_fault(&request.request_header, err), } } else { - self.service_fault(&request.request_header, StatusCode::BadSubscriptionIdInvalid) + self.service_fault( + &request.request_header, + StatusCode::BadSubscriptionIdInvalid, + ) } } } /// Implementation of DeleteMonitoredItems service. See OPC Unified Architecture, Part 4 5.12.6 - pub fn delete_monitored_items(&self, session: Arc>, request: &DeleteMonitoredItemsRequest) -> SupportedMessage { + pub fn delete_monitored_items( + &self, + session: Arc>, + request: &DeleteMonitoredItemsRequest, + ) -> SupportedMessage { if is_empty_option_vec!(request.monitored_item_ids) { self.service_fault(&request.request_header, StatusCode::BadNothingToDo) } else { @@ -159,7 +218,10 @@ impl MonitoredItemService { response.into() } else { // No matching subscription - self.service_fault(&request.request_header, StatusCode::BadSubscriptionIdInvalid) + self.service_fault( + &request.request_header, + StatusCode::BadSubscriptionIdInvalid, + ) } } } diff --git a/server/src/services/node_management.rs b/server/src/services/node_management.rs index 804e34e01..a326250a0 100644 --- a/server/src/services/node_management.rs +++ b/server/src/services/node_management.rs @@ -8,18 +8,10 @@ use std::{ }; use opcua_core::supported_message::SupportedMessage; -use opcua_types::{ - *, - node_ids::ObjectId, - status_code::StatusCode, -}; +use opcua_types::{node_ids::ObjectId, status_code::StatusCode, *}; use crate::{ - address_space::{ - AddressSpace, - relative_path, - types::*, - }, + address_space::{relative_path, types::*, AddressSpace}, services::Service, session::Session, state::ServerState, @@ -28,7 +20,9 @@ use crate::{ pub(crate) struct NodeManagementService; impl Service for NodeManagementService { - fn name(&self) -> String { String::from("NodeManagementService") } + fn name(&self) -> String { + String::from("NodeManagementService") + } } impl NodeManagementService { @@ -37,23 +31,41 @@ impl NodeManagementService { } /// Implements the AddNodes service - pub fn add_nodes(&self, server_state: Arc>, session: Arc>, address_space: Arc>, request: &AddNodesRequest) -> SupportedMessage { + pub fn add_nodes( + &self, + server_state: Arc>, + session: Arc>, + address_space: Arc>, + request: &AddNodesRequest, + ) -> SupportedMessage { // TODO audit - generate AuditAddNodesEventType event if let Some(ref nodes_to_add) = request.nodes_to_add { if !nodes_to_add.is_empty() { let server_state = trace_read_lock_unwrap!(server_state); - if nodes_to_add.len() <= server_state.operational_limits.max_nodes_per_node_management { + if nodes_to_add.len() + <= server_state + .operational_limits + .max_nodes_per_node_management + { let session = trace_read_lock_unwrap!(session); let mut address_space = trace_write_lock_unwrap!(address_space); let decoding_limits = server_state.decoding_limits(); - let results = nodes_to_add.iter().map(|node_to_add| { - let (status_code, added_node_id) = Self::add_node(&session, &mut address_space, node_to_add, &decoding_limits); - AddNodesResult { - status_code, - added_node_id, - } - }).collect(); + let results = nodes_to_add + .iter() + .map(|node_to_add| { + let (status_code, added_node_id) = Self::add_node( + &session, + &mut address_space, + node_to_add, + &decoding_limits, + ); + AddNodesResult { + status_code, + added_node_id, + } + }) + .collect(); let response = AddNodesResponse { response_header: ResponseHeader::new_good(&request.request_header), results: Some(results), @@ -72,22 +84,34 @@ impl NodeManagementService { } /// Implements the AddReferences service - pub fn add_references(&self, server_state: Arc>, session: Arc>, address_space: Arc>, request: &AddReferencesRequest) -> SupportedMessage { + pub fn add_references( + &self, + server_state: Arc>, + session: Arc>, + address_space: Arc>, + request: &AddReferencesRequest, + ) -> SupportedMessage { // TODO audit - generate AuditAddReferencesEventType event if let Some(ref references_to_add) = request.references_to_add { if !references_to_add.is_empty() { let server_state = trace_read_lock_unwrap!(server_state); - if references_to_add.len() <= server_state.operational_limits.max_nodes_per_node_management { + if references_to_add.len() + <= server_state + .operational_limits + .max_nodes_per_node_management + { let session = trace_read_lock_unwrap!(session); let mut address_space = trace_write_lock_unwrap!(address_space); - let results = references_to_add.iter().map(|r| { - Self::add_reference(&session, &mut address_space, r) - }).collect(); + let results = references_to_add + .iter() + .map(|r| Self::add_reference(&session, &mut address_space, r)) + .collect(); AddReferencesResponse { response_header: ResponseHeader::new_good(&request.request_header), results: Some(results), diagnostic_infos: None, - }.into() + } + .into() } else { self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) } @@ -100,17 +124,30 @@ impl NodeManagementService { } /// Implements the DeleteNodes service - pub fn delete_nodes(&self, server_state: Arc>, session: Arc>, address_space: Arc>, request: &DeleteNodesRequest) -> SupportedMessage { + pub fn delete_nodes( + &self, + server_state: Arc>, + session: Arc>, + address_space: Arc>, + request: &DeleteNodesRequest, + ) -> SupportedMessage { // TODO audit - generate AuditDeleteNodesEventType event if let Some(ref nodes_to_delete) = request.nodes_to_delete { if !nodes_to_delete.is_empty() { let server_state = trace_read_lock_unwrap!(server_state); - if nodes_to_delete.len() <= server_state.operational_limits.max_nodes_per_node_management { + if nodes_to_delete.len() + <= server_state + .operational_limits + .max_nodes_per_node_management + { let session = trace_read_lock_unwrap!(session); let mut address_space = trace_write_lock_unwrap!(address_space); - let results = nodes_to_delete.iter().map(|node_to_delete| { - Self::delete_node(&session, &mut address_space, node_to_delete) - }).collect(); + let results = nodes_to_delete + .iter() + .map(|node_to_delete| { + Self::delete_node(&session, &mut address_space, node_to_delete) + }) + .collect(); let response = DeleteNodesResponse { response_header: ResponseHeader::new_good(&request.request_header), results: Some(results), @@ -129,22 +166,34 @@ impl NodeManagementService { } /// Implements the DeleteReferences service - pub fn delete_references(&self, server_state: Arc>, session: Arc>, address_space: Arc>, request: &DeleteReferencesRequest) -> SupportedMessage { + pub fn delete_references( + &self, + server_state: Arc>, + session: Arc>, + address_space: Arc>, + request: &DeleteReferencesRequest, + ) -> SupportedMessage { // TODO audit - generate AuditDeleteReferencesEventType event if let Some(ref references_to_delete) = request.references_to_delete { if !references_to_delete.is_empty() { let server_state = trace_read_lock_unwrap!(server_state); - if references_to_delete.len() <= server_state.operational_limits.max_nodes_per_node_management { + if references_to_delete.len() + <= server_state + .operational_limits + .max_nodes_per_node_management + { let session = trace_read_lock_unwrap!(session); let mut address_space = trace_write_lock_unwrap!(address_space); - let results = references_to_delete.iter().map(|r| { - Self::delete_reference(&session, &mut address_space, r) - }).collect(); + let results = references_to_delete + .iter() + .map(|r| Self::delete_reference(&session, &mut address_space, r)) + .collect(); DeleteReferencesResponse { response_header: ResponseHeader::new_good(&request.request_header), results: Some(results), diagnostic_infos: None, - }.into() + } + .into() } else { self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) } @@ -156,8 +205,17 @@ impl NodeManagementService { } } - fn create_node(node_id: &NodeId, node_class: NodeClass, browse_name: QualifiedName, node_attributes: &ExtensionObject, decoding_limits: &DecodingLimits) -> Result { - let object_id = node_attributes.node_id.as_object_id().map_err(|_| StatusCode::BadNodeAttributesInvalid)?; + fn create_node( + node_id: &NodeId, + node_class: NodeClass, + browse_name: QualifiedName, + node_attributes: &ExtensionObject, + decoding_limits: &DecodingLimits, + ) -> Result { + let object_id = node_attributes + .node_id + .as_object_id() + .map_err(|_| StatusCode::BadNodeAttributesInvalid)?; // Note we are expecting the node_class and the object id for the attributes to be for the same // thing. If they are different, it is an error. @@ -241,7 +299,12 @@ impl NodeManagementService { }.map_err(|_| StatusCode::BadNodeAttributesInvalid) } - fn add_node(session: &Session, address_space: &mut AddressSpace, item: &AddNodesItem, decoding_limits: &DecodingLimits) -> (StatusCode, NodeId) { + fn add_node( + session: &Session, + address_space: &mut AddressSpace, + item: &AddNodesItem, + decoding_limits: &DecodingLimits, + ) -> (StatusCode, NodeId) { if !session.can_modify_address_space() { // No permission to modify address space return (StatusCode::BadUserAccessDenied, NodeId::null()); @@ -275,12 +338,22 @@ impl NodeManagementService { // Test duplicate browse name to same parent let browse_name = if item.browse_name.namespace_index != 0 { - format!("{}:{}", item.browse_name.namespace_index, item.browse_name.name.as_ref()) + format!( + "{}:{}", + item.browse_name.namespace_index, + item.browse_name.name.as_ref() + ) } else { format!("/{}", item.browse_name.name.as_ref()) }; - let relative_path = RelativePath::from_str(&browse_name, &RelativePathElement::default_node_resolver).unwrap(); - if let Ok(nodes) = relative_path::find_nodes_relative_path(address_space, &item.parent_node_id.node_id, &relative_path) { + let relative_path = + RelativePath::from_str(&browse_name, &RelativePathElement::default_node_resolver) + .unwrap(); + if let Ok(nodes) = relative_path::find_nodes_relative_path( + address_space, + &item.parent_node_id.node_id, + &relative_path, + ) { if !nodes.is_empty() { error!("node cannot be created because the browse name is a duplicate"); return (StatusCode::BadBrowseNameDuplicated, NodeId::null()); @@ -299,24 +372,41 @@ impl NodeManagementService { // BadReferenceNotAllowed // Check the type definition is valid - if !address_space.is_valid_type_definition(item.node_class, &item.type_definition.node_id) { + if !address_space + .is_valid_type_definition(item.node_class, &item.type_definition.node_id) + { // Type definition was either invalid or supplied when it should not have been supplied error!("node cannot be created because type definition is not valid"); return (StatusCode::BadTypeDefinitionInvalid, NodeId::null()); } // Check that the parent node exists - if !item.parent_node_id.server_index == 0 || !address_space.node_exists(&item.parent_node_id.node_id) { - error!("node cannot be created because parent node id is invalid or does not exist"); + if !item.parent_node_id.server_index == 0 + || !address_space.node_exists(&item.parent_node_id.node_id) + { + error!( + "node cannot be created because parent node id is invalid or does not exist" + ); return (StatusCode::BadParentNodeIdInvalid, NodeId::null()); } // Create a node - if let Ok(node) = Self::create_node(&new_node_id, item.node_class, item.browse_name.clone(), &item.node_attributes, decoding_limits) { + if let Ok(node) = Self::create_node( + &new_node_id, + item.node_class, + item.browse_name.clone(), + &item.node_attributes, + decoding_limits, + ) { // Add the node to the address space - address_space.insert(node, Some(&[ - (&item.parent_node_id.node_id, &reference_type_id, ReferenceDirection::Forward), - ])); + address_space.insert( + node, + Some(&[( + &item.parent_node_id.node_id, + &reference_type_id, + ReferenceDirection::Forward, + )]), + ); // Object / Variable types must add a reference to the type if item.node_class == NodeClass::Object || item.node_class == NodeClass::Variable { address_space.set_node_type(&new_node_id, item.type_definition.node_id.clone()); @@ -333,7 +423,11 @@ impl NodeManagementService { } } - fn add_reference(session: &Session, address_space: &mut AddressSpace, item: &AddReferencesItem) -> StatusCode { + fn add_reference( + session: &Session, + address_space: &mut AddressSpace, + item: &AddReferencesItem, + ) -> StatusCode { if !session.can_modify_address_space() { // No permission to modify address space StatusCode::BadUserAccessDenied @@ -355,13 +449,25 @@ impl NodeManagementService { } } if let Ok(reference_type_id) = item.reference_type_id.as_reference_type_id() { - if !address_space.has_reference(&item.source_node_id, &item.target_node_id.node_id, reference_type_id) { + if !address_space.has_reference( + &item.source_node_id, + &item.target_node_id.node_id, + reference_type_id, + ) { // TODO test data model constraint // BadReferenceNotAllowed if item.is_forward { - address_space.insert_reference(&item.source_node_id, &item.target_node_id.node_id, reference_type_id); + address_space.insert_reference( + &item.source_node_id, + &item.target_node_id.node_id, + reference_type_id, + ); } else { - address_space.insert_reference(&item.target_node_id.node_id, &item.source_node_id, reference_type_id); + address_space.insert_reference( + &item.target_node_id.node_id, + &item.source_node_id, + reference_type_id, + ); } StatusCode::Good } else { @@ -375,7 +481,11 @@ impl NodeManagementService { } } - fn delete_node(session: &Session, address_space: &mut AddressSpace, item: &DeleteNodesItem) -> StatusCode { + fn delete_node( + session: &Session, + address_space: &mut AddressSpace, + item: &DeleteNodesItem, + ) -> StatusCode { if !session.can_modify_address_space() { // No permission to modify address space StatusCode::BadUserAccessDenied @@ -387,7 +497,11 @@ impl NodeManagementService { } } - fn delete_reference(session: &Session, address_space: &mut AddressSpace, item: &DeleteReferencesItem) -> StatusCode { + fn delete_reference( + session: &Session, + address_space: &mut AddressSpace, + item: &DeleteReferencesItem, + ) -> StatusCode { let node_id = &item.source_node_id; let target_node_id = &item.target_node_id.node_id; diff --git a/server/src/services/query.rs b/server/src/services/query.rs index 8525abc66..41dd95b99 100644 --- a/server/src/services/query.rs +++ b/server/src/services/query.rs @@ -5,23 +5,17 @@ use std::sync::{Arc, RwLock}; use opcua_core::supported_message::SupportedMessage; -use opcua_types::{ - *, - status_code::StatusCode, -}; - -use crate::{ - address_space::AddressSpace, - services::Service, - session::Session, - state::ServerState, -}; +use opcua_types::{status_code::StatusCode, *}; + +use crate::{address_space::AddressSpace, services::Service, session::Session, state::ServerState}; /// The view service. Allows the client to browse the address space of the server. pub(crate) struct QueryService; impl Service for QueryService { - fn name(&self) -> String { String::from("QueryService") } + fn name(&self) -> String { + String::from("QueryService") + } } impl QueryService { @@ -29,11 +23,23 @@ impl QueryService { QueryService {} } - pub fn query_first(&self, _server_state: Arc>, _session: Arc>, _address_space: Arc>, request: &QueryFirstRequest) -> SupportedMessage { + pub fn query_first( + &self, + _server_state: Arc>, + _session: Arc>, + _address_space: Arc>, + request: &QueryFirstRequest, + ) -> SupportedMessage { self.service_fault(&request.request_header, StatusCode::BadNotSupported) } - pub fn query_next(&self, _server_state: Arc>, _session: Arc>, _address_space: Arc>, request: &QueryNextRequest) -> SupportedMessage { + pub fn query_next( + &self, + _server_state: Arc>, + _session: Arc>, + _address_space: Arc>, + request: &QueryNextRequest, + ) -> SupportedMessage { self.service_fault(&request.request_header, StatusCode::BadNotSupported) } -} \ No newline at end of file +} diff --git a/server/src/services/session.rs b/server/src/services/session.rs index f1fd4a986..59f3ac84f 100644 --- a/server/src/services/session.rs +++ b/server/src/services/session.rs @@ -5,10 +5,8 @@ use std::sync::{Arc, RwLock}; use opcua_core::supported_message::SupportedMessage; -use opcua_crypto::{self as crypto, CertificateStore, random, SecurityPolicy}; -use opcua_types::{ - *, status_code::StatusCode, -}; +use opcua_crypto::{self as crypto, random, CertificateStore, SecurityPolicy}; +use opcua_types::{status_code::StatusCode, *}; use crate::{ address_space::address_space::AddressSpace, @@ -23,7 +21,9 @@ use crate::{ pub(crate) struct SessionService; impl Service for SessionService { - fn name(&self) -> String { String::from("SessionService") } + fn name(&self) -> String { + String::from("SessionService") + } } impl SessionService { @@ -31,7 +31,14 @@ impl SessionService { SessionService {} } - pub fn create_session(&self, certificate_store: &CertificateStore, server_state: Arc>, session: Arc>, address_space: Arc>, request: &CreateSessionRequest) -> SupportedMessage { + pub fn create_session( + &self, + certificate_store: &CertificateStore, + server_state: Arc>, + session: Arc>, + address_space: Arc>, + request: &CreateSessionRequest, + ) -> SupportedMessage { let server_state = trace_write_lock_unwrap!(server_state); let mut session = trace_write_lock_unwrap!(session); @@ -65,7 +72,8 @@ impl SessionService { let endpoints = endpoints.unwrap(); // Extract the client certificate if one is supplied - let client_certificate = crypto::X509::from_byte_string(&request.client_certificate).ok(); + let client_certificate = + crypto::X509::from_byte_string(&request.client_certificate).ok(); // Check the client's certificate for validity and acceptance let security_policy = { @@ -75,14 +83,24 @@ impl SessionService { }; let service_result = if security_policy != SecurityPolicy::None { let result = if let Some(ref client_certificate) = client_certificate { - certificate_store.validate_or_reject_application_instance_cert(client_certificate, security_policy, None, None) + certificate_store.validate_or_reject_application_instance_cert( + client_certificate, + security_policy, + None, + None, + ) } else { warn!("Certificate supplied by client is invalid"); StatusCode::BadCertificateInvalid }; if result.is_bad() { // Log an error - audit::log_certificate_error(&server_state, address_space.clone(), result, &request.request_header); + audit::log_certificate_error( + &server_state, + address_space.clone(), + result, + &request.request_header, + ); // Rejected for security reasons let mut diagnostics = trace_write_lock_unwrap!(server_state.diagnostics); @@ -94,14 +112,22 @@ impl SessionService { }; if service_result.is_bad() { - audit::log_create_session(&server_state, &session, address_space, false, 0f64, request); + audit::log_create_session( + &server_state, + &session, + address_space, + false, + 0f64, + request, + ); self.service_fault(&request.request_header, service_result) } else { - let session_timeout = if request.requested_session_timeout > constants::MAX_SESSION_TIMEOUT { - constants::MAX_SESSION_TIMEOUT - } else { - request.requested_session_timeout - }; + let session_timeout = + if request.requested_session_timeout > constants::MAX_SESSION_TIMEOUT { + constants::MAX_SESSION_TIMEOUT + } else { + request.requested_session_timeout + }; let max_request_message_size = constants::MAX_REQUEST_MESSAGE_SIZE; @@ -131,7 +157,14 @@ impl SessionService { session.set_session_nonce(server_nonce.clone()); session.set_session_name(request.session_name.clone()); - audit::log_create_session(&server_state, &session, address_space.clone(), true, session_timeout, request); + audit::log_create_session( + &server_state, + &session, + address_space.clone(), + true, + session_timeout, + request, + ); // Create a session id in the address space session.register_session(address_space); @@ -147,12 +180,19 @@ impl SessionService { server_software_certificates: None, server_signature, max_request_message_size, - }.into() + } + .into() } } } - pub fn activate_session(&self, server_state: Arc>, session: Arc>, address_space: Arc>, request: &ActivateSessionRequest) -> SupportedMessage { + pub fn activate_session( + &self, + server_state: Arc>, + session: Arc>, + address_space: Arc>, + request: &ActivateSessionRequest, + ) -> SupportedMessage { let server_state = trace_write_lock_unwrap!(server_state); let mut session = trace_write_lock_unwrap!(session); let endpoint_url = session.endpoint_url().as_ref(); @@ -160,26 +200,40 @@ impl SessionService { let (security_policy, security_mode) = { let secure_channel = session.secure_channel(); let secure_channel = trace_read_lock_unwrap!(secure_channel); - (secure_channel.security_policy(), secure_channel.security_mode()) + ( + secure_channel.security_policy(), + secure_channel.security_mode(), + ) }; let server_nonce = security_policy.random_nonce(); - let mut service_result = if !server_state.endpoint_exists(endpoint_url, security_policy, security_mode) { - // Need an endpoint - error!("Endpoint does not exist for requested url & mode {}, {:?} / {:?}", endpoint_url, security_policy, security_mode); - StatusCode::BadTcpEndpointUrlInvalid - } else if security_policy != SecurityPolicy::None { - // Crypto see 5.6.3.1 verify the caller is the same caller as create_session by validating - // signature supplied by the client during the create. - Self::verify_client_signature(&server_state, &session, &request.client_signature) - } else { - // No cert checks for no security - StatusCode::Good - }; + let mut service_result = + if !server_state.endpoint_exists(endpoint_url, security_policy, security_mode) { + // Need an endpoint + error!( + "Endpoint does not exist for requested url & mode {}, {:?} / {:?}", + endpoint_url, security_policy, security_mode + ); + StatusCode::BadTcpEndpointUrlInvalid + } else if security_policy != SecurityPolicy::None { + // Crypto see 5.6.3.1 verify the caller is the same caller as create_session by validating + // signature supplied by the client during the create. + Self::verify_client_signature(&server_state, &session, &request.client_signature) + } else { + // No cert checks for no security + StatusCode::Good + }; if service_result.is_good() { - if let Err(err) = server_state.authenticate_endpoint(request, endpoint_url, security_policy, security_mode, &request.user_identity_token, session.session_nonce()) { + if let Err(err) = server_state.authenticate_endpoint( + request, + endpoint_url, + security_policy, + security_mode, + &request.user_identity_token, + session.session_nonce(), + ) { service_result = err; } } @@ -188,7 +242,10 @@ impl SessionService { if service_result.is_good() { session.set_activated(true); session.set_session_nonce(server_nonce); - session.set_user_identity(IdentityToken::new(&request.user_identity_token, &server_state.decoding_limits())); + session.set_user_identity(IdentityToken::new( + &request.user_identity_token, + &server_state.decoding_limits(), + )); session.set_locale_ids(request.locale_ids.clone()); let diagnostic_infos = None; @@ -200,14 +257,21 @@ impl SessionService { server_nonce: session.session_nonce().clone(), results: None, diagnostic_infos, - }.into() + } + .into() } else { session.set_activated(false); self.service_fault(&request.request_header, service_result) } } - pub fn close_session(&self, server_state: Arc>, session: Arc>, address_space: Arc>, request: &CloseSessionRequest) -> SupportedMessage { + pub fn close_session( + &self, + server_state: Arc>, + session: Arc>, + address_space: Arc>, + request: &CloseSessionRequest, + ) -> SupportedMessage { let server_state = trace_write_lock_unwrap!(server_state); let mut session = trace_write_lock_unwrap!(session); session.set_authentication_token(NodeId::null()); @@ -218,20 +282,31 @@ impl SessionService { CloseSessionResponse { response_header: ResponseHeader::new_good(&request.request_header), - }.into() + } + .into() } - pub fn cancel(&self, _server_state: Arc>, _session: Arc>, request: &CancelRequest) -> SupportedMessage { + pub fn cancel( + &self, + _server_state: Arc>, + _session: Arc>, + request: &CancelRequest, + ) -> SupportedMessage { // This service call currently does nothing CancelResponse { response_header: ResponseHeader::new_good(&request.request_header), cancel_count: 0, - }.into() + } + .into() } /// Verifies that the supplied client signature was produced by the session's client certificate /// from the server's certificate and nonce. - fn verify_client_signature(server_state: &ServerState, session: &Session, client_signature: &SignatureData) -> StatusCode { + fn verify_client_signature( + server_state: &ServerState, + session: &Session, + client_signature: &SignatureData, + ) -> StatusCode { if let Some(ref client_certificate) = session.client_certificate() { if let Some(ref server_certificate) = server_state.server_certificate { let security_policy = { @@ -239,7 +314,13 @@ impl SessionService { let secure_channel = trace_read_lock_unwrap!(secure_channel); secure_channel.security_policy() }; - crypto::verify_signature_data(client_signature, security_policy, client_certificate, server_certificate, session.session_nonce().as_ref()) + crypto::verify_signature_data( + client_signature, + security_policy, + client_certificate, + server_certificate, + session.session_nonce().as_ref(), + ) } else { error!("Client signature verification failed, server has no server certificate"); StatusCode::BadUnexpectedError @@ -249,4 +330,4 @@ impl SessionService { StatusCode::BadUnexpectedError } } -} \ No newline at end of file +} diff --git a/server/src/services/subscription.rs b/server/src/services/subscription.rs index aea936bf5..44e828e75 100644 --- a/server/src/services/subscription.rs +++ b/server/src/services/subscription.rs @@ -5,13 +5,10 @@ use std::sync::{Arc, RwLock}; use opcua_core::supported_message::SupportedMessage; -use opcua_types::{*, status_code::StatusCode}; +use opcua_types::{status_code::StatusCode, *}; use crate::{ - address_space::AddressSpace, - services::Service, - session::Session, - state::ServerState, + address_space::AddressSpace, services::Service, session::Session, state::ServerState, subscriptions::subscription::Subscription, }; @@ -20,7 +17,9 @@ use crate::{ pub(crate) struct SubscriptionService; impl Service for SubscriptionService { - fn name(&self) -> String { String::from("SubscriptionService") } + fn name(&self) -> String { + String::from("SubscriptionService") + } } impl SubscriptionService { @@ -29,20 +28,32 @@ impl SubscriptionService { } /// Handles a CreateSubscriptionRequest - pub fn create_subscription(&self, server_state: Arc>, session: Arc>, request: &CreateSubscriptionRequest) -> SupportedMessage { + pub fn create_subscription( + &self, + server_state: Arc>, + session: Arc>, + request: &CreateSubscriptionRequest, + ) -> SupportedMessage { let mut server_state = trace_write_lock_unwrap!(server_state); let mut session = trace_write_lock_unwrap!(session); let subscriptions = session.subscriptions_mut(); - if server_state.max_subscriptions > 0 && subscriptions.len() >= server_state.max_subscriptions { + if server_state.max_subscriptions > 0 + && subscriptions.len() >= server_state.max_subscriptions + { self.service_fault(&request.request_header, StatusCode::BadTooManySubscriptions) } else { let subscription_id = server_state.create_subscription_id(); // Check the requested publishing interval and keep alive values let (revised_publishing_interval, revised_max_keep_alive_count, revised_lifetime_count) = - Self::revise_subscription_values(&server_state, request.requested_publishing_interval, request.requested_max_keep_alive_count, request.requested_lifetime_count); + Self::revise_subscription_values( + &server_state, + request.requested_publishing_interval, + request.requested_max_keep_alive_count, + request.requested_lifetime_count, + ); // Create a new subscription let publishing_enabled = request.publishing_enabled; @@ -53,7 +64,8 @@ impl SubscriptionService { revised_publishing_interval, revised_lifetime_count, revised_max_keep_alive_count, - request.priority); + request.priority, + ); subscriptions.insert(subscription_id, subscription); // Create the response @@ -63,12 +75,18 @@ impl SubscriptionService { revised_publishing_interval, revised_lifetime_count, revised_max_keep_alive_count, - }.into() + } + .into() } } /// Handles a ModifySubscriptionRequest - pub fn modify_subscription(&self, server_state: Arc>, session: Arc>, request: &ModifySubscriptionRequest) -> SupportedMessage { + pub fn modify_subscription( + &self, + server_state: Arc>, + session: Arc>, + request: &ModifySubscriptionRequest, + ) -> SupportedMessage { let server_state = trace_write_lock_unwrap!(server_state); let mut session = trace_write_lock_unwrap!(session); @@ -76,12 +94,20 @@ impl SubscriptionService { let subscription_id = request.subscription_id; if !subscriptions.contains(subscription_id) { - self.service_fault(&request.request_header, StatusCode::BadSubscriptionIdInvalid) + self.service_fault( + &request.request_header, + StatusCode::BadSubscriptionIdInvalid, + ) } else { let subscription = subscriptions.get_mut(subscription_id).unwrap(); let (revised_publishing_interval, revised_max_keep_alive_count, revised_lifetime_count) = - SubscriptionService::revise_subscription_values(&server_state, request.requested_publishing_interval, request.requested_max_keep_alive_count, request.requested_lifetime_count); + SubscriptionService::revise_subscription_values( + &server_state, + request.requested_publishing_interval, + request.requested_max_keep_alive_count, + request.requested_lifetime_count, + ); subscription.set_publishing_interval(revised_publishing_interval); subscription.set_max_keep_alive_count(revised_max_keep_alive_count); @@ -96,12 +122,17 @@ impl SubscriptionService { revised_publishing_interval, revised_lifetime_count, revised_max_keep_alive_count, - }.into() + } + .into() } } /// Implementation of SetPublishingModeRequest service. See OPC Unified Architecture, Part 4 5.13.4 - pub fn set_publishing_mode(&self, session: Arc>, request: &SetPublishingModeRequest) -> SupportedMessage { + pub fn set_publishing_mode( + &self, + session: Arc>, + request: &SetPublishingModeRequest, + ) -> SupportedMessage { if is_empty_option_vec!(request.subscription_ids) { self.service_fault(&request.request_header, StatusCode::BadNothingToDo) } else { @@ -127,12 +158,17 @@ impl SubscriptionService { response_header: ResponseHeader::new_good(&request.request_header), results, diagnostic_infos, - }.into() + } + .into() } } /// Handles a TransferSubscriptionsRequest - pub fn transfer_subscriptions(&self, _session: Arc>, request: &TransferSubscriptionsRequest) -> SupportedMessage { + pub fn transfer_subscriptions( + &self, + _session: Arc>, + request: &TransferSubscriptionsRequest, + ) -> SupportedMessage { if is_empty_option_vec!(request.subscription_ids) { self.service_fault(&request.request_header, StatusCode::BadNothingToDo) } else { @@ -140,12 +176,13 @@ impl SubscriptionService { let results = { // TODO this is a stub. The real thing should look up subscriptions belonging to // other sessions and transfer them across to this one. - let results = subscription_ids.iter().map(|_subscription_id| { - TransferResult { + let results = subscription_ids + .iter() + .map(|_subscription_id| TransferResult { status_code: StatusCode::BadSubscriptionIdInvalid, available_sequence_numbers: None, - } - }).collect::>(); + }) + .collect::>(); Some(results) }; let diagnostic_infos = None; @@ -153,12 +190,17 @@ impl SubscriptionService { response_header: ResponseHeader::new_good(&request.request_header), results, diagnostic_infos, - }.into() + } + .into() } } /// Handles a DeleteSubscriptionsRequest - pub fn delete_subscriptions(&self, session: Arc>, request: &DeleteSubscriptionsRequest) -> SupportedMessage { + pub fn delete_subscriptions( + &self, + session: Arc>, + request: &DeleteSubscriptionsRequest, + ) -> SupportedMessage { if is_empty_option_vec!(request.subscription_ids) { self.service_fault(&request.request_header, StatusCode::BadNothingToDo) } else { @@ -167,14 +209,17 @@ impl SubscriptionService { let results = { let subscriptions = session.subscriptions_mut(); // Attempt to remove each subscription - let results = subscription_ids.iter().map(|subscription_id| { - let subscription = subscriptions.remove(*subscription_id); - if subscription.is_some() { - StatusCode::Good - } else { - StatusCode::BadSubscriptionIdInvalid - } - }).collect::>(); + let results = subscription_ids + .iter() + .map(|subscription_id| { + let subscription = subscriptions.remove(*subscription_id); + if subscription.is_some() { + StatusCode::Good + } else { + StatusCode::BadSubscriptionIdInvalid + } + }) + .collect::>(); Some(results) }; let diagnostic_infos = None; @@ -182,12 +227,20 @@ impl SubscriptionService { response_header: ResponseHeader::new_good(&request.request_header), results, diagnostic_infos, - }.into() + } + .into() } } /// Handles a PublishRequest. This is asynchronous, so the response will be sent later on. - pub fn async_publish(&self, now: &DateTimeUtc, session: Arc>, address_space: Arc>, request_id: u32, request: &PublishRequest) -> Option { + pub fn async_publish( + &self, + now: &DateTimeUtc, + session: Arc>, + address_space: Arc>, + request_id: u32, + request: &PublishRequest, + ) -> Option { trace!("--> Receive a PublishRequest {:?}", request); let mut session = trace_write_lock_unwrap!(session); if session.subscriptions().is_empty() { @@ -195,7 +248,8 @@ impl SubscriptionService { } else { let address_space = trace_read_lock_unwrap!(address_space); let request_header = request.request_header.clone(); - let result = session.enqueue_publish_request(now, request_id, request.clone(), &address_space); + let result = + session.enqueue_publish_request(now, request_id, request.clone(), &address_space); if let Err(error) = result { Some(self.service_fault(&request_header, error)) } else { @@ -205,11 +259,17 @@ impl SubscriptionService { } /// Handles a RepublishRequest - pub fn republish(&self, session: Arc>, request: &RepublishRequest) -> SupportedMessage { + pub fn republish( + &self, + session: Arc>, + request: &RepublishRequest, + ) -> SupportedMessage { trace!("Republish {:?}", request); // Look for a matching notification message let mut session = trace_write_lock_unwrap!(session); - let result = session.subscriptions().find_notification_message(request.subscription_id, request.retransmit_sequence_number); + let result = session + .subscriptions() + .find_notification_message(request.subscription_id, request.retransmit_sequence_number); if let Ok(notification_message) = result { session.reset_subscription_lifetime_counter(request.subscription_id); let response = RepublishResponse { @@ -224,15 +284,24 @@ impl SubscriptionService { /// This function takes the requested values passed in a create / modify and returns revised /// values that conform to the server's limits. For simplicity the return type is a tuple - fn revise_subscription_values(server_state: &ServerState, requested_publishing_interval: Duration, requested_max_keep_alive_count: u32, requested_lifetime_count: u32) -> (Duration, u32, u32) { - let revised_publishing_interval = f64::max(requested_publishing_interval, server_state.min_publishing_interval_ms); - let revised_max_keep_alive_count = if requested_max_keep_alive_count > server_state.max_keep_alive_count { - server_state.max_keep_alive_count - } else if requested_max_keep_alive_count == 0 { - server_state.default_keep_alive_count - } else { - requested_max_keep_alive_count - }; + fn revise_subscription_values( + server_state: &ServerState, + requested_publishing_interval: Duration, + requested_max_keep_alive_count: u32, + requested_lifetime_count: u32, + ) -> (Duration, u32, u32) { + let revised_publishing_interval = f64::max( + requested_publishing_interval, + server_state.min_publishing_interval_ms, + ); + let revised_max_keep_alive_count = + if requested_max_keep_alive_count > server_state.max_keep_alive_count { + server_state.max_keep_alive_count + } else if requested_max_keep_alive_count == 0 { + server_state.default_keep_alive_count + } else { + requested_max_keep_alive_count + }; // Lifetime count must exceed keep alive count by at least a multiple of let min_lifetime_count = revised_max_keep_alive_count * 3; let revised_lifetime_count = if requested_lifetime_count < min_lifetime_count { @@ -242,6 +311,10 @@ impl SubscriptionService { } else { requested_lifetime_count }; - (revised_publishing_interval, revised_max_keep_alive_count, revised_lifetime_count) + ( + revised_publishing_interval, + revised_max_keep_alive_count, + revised_lifetime_count, + ) } -} \ No newline at end of file +} diff --git a/server/src/services/view.rs b/server/src/services/view.rs index 55cb97d5d..2ca6cc80e 100644 --- a/server/src/services/view.rs +++ b/server/src/services/view.rs @@ -7,14 +7,10 @@ use std::sync::{Arc, Mutex, RwLock}; use opcua_core::supported_message::SupportedMessage; use opcua_crypto::random; -use opcua_types::{ - *, - node_ids::ReferenceTypeId, - status_code::StatusCode, -}; +use opcua_types::{node_ids::ReferenceTypeId, status_code::StatusCode, *}; use crate::{ - address_space::{AddressSpace, relative_path}, + address_space::{relative_path, AddressSpace}, continuation_point::BrowseContinuationPoint, services::Service, session::Session, @@ -25,7 +21,9 @@ use crate::{ pub(crate) struct ViewService; impl Service for ViewService { - fn name(&self) -> String { String::from("ViewService") } + fn name(&self) -> String { + String::from("ViewService") + } } impl ViewService { @@ -33,7 +31,13 @@ impl ViewService { ViewService {} } - pub fn browse(&self, server_state: Arc>, session: Arc>, address_space: Arc>, request: &BrowseRequest) -> SupportedMessage { + pub fn browse( + &self, + server_state: Arc>, + session: Arc>, + address_space: Arc>, + request: &BrowseRequest, + ) -> SupportedMessage { if is_empty_option_vec!(request.nodes_to_browse) { self.service_fault(&request.request_header, StatusCode::BadNothingToDo) } else { @@ -53,32 +57,49 @@ impl ViewService { // Max references per node. This should be server configurable but the constant // is generous. TODO this value needs to adapt for the max message size const DEFAULT_MAX_REFERENCES_PER_NODE: u32 = 255; - let max_references_per_node = if request.requested_max_references_per_node == 0 { + let max_references_per_node = if request.requested_max_references_per_node == 0 + { // Client imposes no limit DEFAULT_MAX_REFERENCES_PER_NODE - } else if request.requested_max_references_per_node > DEFAULT_MAX_REFERENCES_PER_NODE { + } else if request.requested_max_references_per_node + > DEFAULT_MAX_REFERENCES_PER_NODE + { // Client limit exceeds default DEFAULT_MAX_REFERENCES_PER_NODE } else { request.requested_max_references_per_node }; // Browse the nodes - let results = Some(Self::browse_nodes(&mut session, &address_space, nodes_to_browse, max_references_per_node as usize)); + let results = Some(Self::browse_nodes( + &mut session, + &address_space, + nodes_to_browse, + max_references_per_node as usize, + )); let diagnostic_infos = None; BrowseResponse { response_header: ResponseHeader::new_good(&request.request_header), results, diagnostic_infos, - }.into() + } + .into() } else { - error!("Browse request too many nodes to browse {}", nodes_to_browse.len()); + error!( + "Browse request too many nodes to browse {}", + nodes_to_browse.len() + ); self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) } } } } - pub fn browse_next(&self, session: Arc>, address_space: Arc>, request: &BrowseNextRequest) -> SupportedMessage { + pub fn browse_next( + &self, + session: Arc>, + address_space: Arc>, + request: &BrowseNextRequest, + ) -> SupportedMessage { if is_empty_option_vec!(request.continuation_points) { self.service_fault(&request.request_header, StatusCode::BadNothingToDo) } else { @@ -92,9 +113,16 @@ impl ViewService { } else { // Iterate from the continuation point, assuming it is valid session.remove_expired_browse_continuation_points(&address_space); - let results = continuation_points.iter().map(|continuation_point| { - Self::browse_from_continuation_point(&mut session, &address_space, continuation_point) - }).collect(); + let results = continuation_points + .iter() + .map(|continuation_point| { + Self::browse_from_continuation_point( + &mut session, + &address_space, + continuation_point, + ) + }) + .collect(); Some(results) }; @@ -103,89 +131,117 @@ impl ViewService { response_header: ResponseHeader::new_good(&request.request_header), results, diagnostic_infos, - }.into() + } + .into() } } - pub fn translate_browse_paths_to_node_ids(&self, server_state: Arc>, address_space: Arc>, request: &TranslateBrowsePathsToNodeIdsRequest) -> SupportedMessage { + pub fn translate_browse_paths_to_node_ids( + &self, + server_state: Arc>, + address_space: Arc>, + request: &TranslateBrowsePathsToNodeIdsRequest, + ) -> SupportedMessage { if is_empty_option_vec!(request.browse_paths) { self.service_fault(&request.request_header, StatusCode::BadNothingToDo) } else { let server_state = trace_read_lock_unwrap!(server_state); let address_space = trace_read_lock_unwrap!(address_space); let browse_paths = request.browse_paths.as_ref().unwrap(); - let max_browse_paths_per_translate = server_state.operational_limits.max_nodes_per_translate_browse_paths_to_node_ids; + let max_browse_paths_per_translate = server_state + .operational_limits + .max_nodes_per_translate_browse_paths_to_node_ids; if browse_paths.len() <= max_browse_paths_per_translate { - let results = browse_paths.iter().enumerate().map(|(i, browse_path)| { - trace!("Processing browse path {}", i); - let node_id = browse_path.starting_node.clone(); - if browse_path.relative_path.elements.is_none() { - BrowsePathResult { - status_code: StatusCode::BadNothingToDo, - targets: None, - } - } else { - // Starting from the node_id, find paths - match relative_path::find_nodes_relative_path(&address_space, &node_id, &browse_path.relative_path) { - Err(err) => { - trace!("Browse path result for find nodes returned in error {}", err.name()); - BrowsePathResult { - status_code: err, - targets: None, - - } + let results = browse_paths + .iter() + .enumerate() + .map(|(i, browse_path)| { + trace!("Processing browse path {}", i); + let node_id = browse_path.starting_node.clone(); + if browse_path.relative_path.elements.is_none() { + BrowsePathResult { + status_code: StatusCode::BadNothingToDo, + targets: None, } - Ok(result) => { - let targets = if !result.is_empty() { - use std::u32; - let targets = result.iter().map(|node_id| { - BrowsePathTarget { - target_id: ExpandedNodeId::new(node_id.clone()), - remaining_path_index: u32::MAX, - } - }).collect(); - Some(targets) - } else { - None - }; - BrowsePathResult { - status_code: StatusCode::Good, - targets, + } else { + // Starting from the node_id, find paths + match relative_path::find_nodes_relative_path( + &address_space, + &node_id, + &browse_path.relative_path, + ) { + Err(err) => { + trace!( + "Browse path result for find nodes returned in error {}", + err.name() + ); + BrowsePathResult { + status_code: err, + targets: None, + } + } + Ok(result) => { + let targets = if !result.is_empty() { + use std::u32; + let targets = result + .iter() + .map(|node_id| BrowsePathTarget { + target_id: ExpandedNodeId::new(node_id.clone()), + remaining_path_index: u32::MAX, + }) + .collect(); + Some(targets) + } else { + None + }; + BrowsePathResult { + status_code: StatusCode::Good, + targets, + } } } } - } - }).collect(); + }) + .collect(); TranslateBrowsePathsToNodeIdsResponse { response_header: ResponseHeader::new_good(&request.request_header), results: Some(results), diagnostic_infos: None, - }.into() + } + .into() } else { - error!("Browse paths size {} exceeds max nodes {}", browse_paths.len(), max_browse_paths_per_translate); + error!( + "Browse paths size {} exceeds max nodes {}", + browse_paths.len(), + max_browse_paths_per_translate + ); self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) } } } - pub fn register_nodes(&self, server_state: Arc>, session: Arc>, request: &RegisterNodesRequest) -> SupportedMessage { + pub fn register_nodes( + &self, + server_state: Arc>, + session: Arc>, + request: &RegisterNodesRequest, + ) -> SupportedMessage { if is_empty_option_vec!(request.nodes_to_register) { self.service_fault(&request.request_header, StatusCode::BadNothingToDo) } else { let mut server_state = trace_write_lock_unwrap!(server_state); let nodes_to_register = request.nodes_to_register.as_ref().unwrap(); - if nodes_to_register.len() <= server_state.operational_limits.max_nodes_per_register_nodes { + if nodes_to_register.len() + <= server_state.operational_limits.max_nodes_per_register_nodes + { if let Some(ref mut callback) = server_state.register_nodes_callback { match callback.register_nodes(session, &nodes_to_register[..]) { - Ok(registered_node_ids) => { - RegisterNodesResponse { - response_header: ResponseHeader::new_good(&request.request_header), - registered_node_ids: Some(registered_node_ids), - }.into() - } - Err(err) => { - self.service_fault(&request.request_header, err) + Ok(registered_node_ids) => RegisterNodesResponse { + response_header: ResponseHeader::new_good(&request.request_header), + registered_node_ids: Some(registered_node_ids), } + .into(), + Err(err) => self.service_fault(&request.request_header, err), } } else { // There is no callback for registering nodes, so just pretend they're registered. @@ -193,60 +249,92 @@ impl ViewService { RegisterNodesResponse { response_header: ResponseHeader::new_good(&request.request_header), registered_node_ids: Some(registered_node_ids), - }.into() + } + .into() } } else { - error!("Register nodes too many operations {}", nodes_to_register.len()); + error!( + "Register nodes too many operations {}", + nodes_to_register.len() + ); self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) } } } - pub fn unregister_nodes(&self, server_state: Arc>, session: Arc>, request: &UnregisterNodesRequest) -> SupportedMessage { + pub fn unregister_nodes( + &self, + server_state: Arc>, + session: Arc>, + request: &UnregisterNodesRequest, + ) -> SupportedMessage { if is_empty_option_vec!(request.nodes_to_unregister) { self.service_fault(&request.request_header, StatusCode::BadNothingToDo) } else { let mut server_state = trace_write_lock_unwrap!(server_state); let nodes_to_unregister = request.nodes_to_unregister.as_ref().unwrap(); - if nodes_to_unregister.len() <= server_state.operational_limits.max_nodes_per_register_nodes { + if nodes_to_unregister.len() + <= server_state.operational_limits.max_nodes_per_register_nodes + { if let Some(ref mut callback) = server_state.unregister_nodes_callback { match callback.unregister_nodes(session, &nodes_to_unregister[..]) { - Ok(_) => { - UnregisterNodesResponse { - response_header: ResponseHeader::new_good(&request.request_header), - }.into() - } - Err(err) => { - self.service_fault(&request.request_header, err) + Ok(_) => UnregisterNodesResponse { + response_header: ResponseHeader::new_good(&request.request_header), } + .into(), + Err(err) => self.service_fault(&request.request_header, err), } } else { // There is no callback so just succeed UnregisterNodesResponse { response_header: ResponseHeader::new_good(&request.request_header), - }.into() + } + .into() } } else { - error!("Unregister nodes too many operations {}", nodes_to_unregister.len()); + error!( + "Unregister nodes too many operations {}", + nodes_to_unregister.len() + ); self.service_fault(&request.request_header, StatusCode::BadTooManyOperations) } } } - fn browse_nodes(session: &mut Session, address_space: &AddressSpace, nodes_to_browse: &[BrowseDescription], max_references_per_node: usize) -> Vec { - nodes_to_browse.iter().map(|node_to_browse| { - match Self::browse_node(session, address_space, 0, node_to_browse, max_references_per_node) { - Ok(browse_result) => browse_result, - Err(status_code) => BrowseResult { - status_code, - continuation_point: ByteString::null(), - references: None, + fn browse_nodes( + session: &mut Session, + address_space: &AddressSpace, + nodes_to_browse: &[BrowseDescription], + max_references_per_node: usize, + ) -> Vec { + nodes_to_browse + .iter() + .map(|node_to_browse| { + match Self::browse_node( + session, + address_space, + 0, + node_to_browse, + max_references_per_node, + ) { + Ok(browse_result) => browse_result, + Err(status_code) => BrowseResult { + status_code, + continuation_point: ByteString::null(), + references: None, + }, } - } - }).collect() + }) + .collect() } - fn browse_node(session: &mut Session, address_space: &AddressSpace, starting_index: usize, node_to_browse: &BrowseDescription, max_references_per_node: usize) -> Result { + fn browse_node( + session: &mut Session, + address_space: &AddressSpace, + starting_index: usize, + node_to_browse: &BrowseDescription, + max_references_per_node: usize, + ) -> Result { // Node must exist or there will be no references if node_to_browse.node_id.is_null() || !address_space.node_exists(&node_to_browse.node_id) { return Err(StatusCode::BadNodeIdUnknown); @@ -267,13 +355,19 @@ impl ViewService { // Fetch the references to / from the given node to browse - let (references, inverse_ref_idx) = address_space.find_references_by_direction(&node_to_browse.node_id, node_to_browse.browse_direction, reference_type_id); + let (references, inverse_ref_idx) = address_space.find_references_by_direction( + &node_to_browse.node_id, + node_to_browse.browse_direction, + reference_type_id, + ); - let result_mask = BrowseDescriptionResultMask::from_bits_truncate(node_to_browse.result_mask); + let result_mask = + BrowseDescriptionResultMask::from_bits_truncate(node_to_browse.result_mask); let node_class_mask = NodeClassMask::from_bits_truncate(node_to_browse.node_class_mask); // Construct descriptions for each reference - let mut reference_descriptions: Vec = Vec::with_capacity(max_references_per_node); + let mut reference_descriptions: Vec = + Vec::with_capacity(max_references_per_node); for (idx, reference) in references.iter().enumerate() { if idx < starting_index { continue; @@ -299,52 +393,59 @@ impl ViewService { } // Prepare the values to put into the struct according to the result mask - let reference_type_id = if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_REFERENCE_TYPE) { - reference.reference_type.clone() - } else { - NodeId::null() - }; - let is_forward = if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_IS_FORWARD) { - idx < inverse_ref_idx - } else { - true - }; + let reference_type_id = + if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_REFERENCE_TYPE) { + reference.reference_type.clone() + } else { + NodeId::null() + }; + let is_forward = + if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_IS_FORWARD) { + idx < inverse_ref_idx + } else { + true + }; - let target_node_class = if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_NODE_CLASS) { - target_node_class - } else { - NodeClass::Unspecified - }; - let browse_name = if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_BROWSE_NAME) { - target_node.browse_name().clone() - } else { - QualifiedName::null() - }; - let display_name = if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_DISPLAY_NAME) { - target_node.display_name().clone() - } else { - LocalizedText::null() - }; - let type_definition = if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_TYPE_DEFINITION) { - // Type definition NodeId of the TargetNode. Type definitions are only available - // for the NodeClasses Object and Variable. For all other NodeClasses a null NodeId - // shall be returned. - match target_node_class { - NodeClass::Object | NodeClass::Variable => { - let type_defs = address_space.find_references(&target_node.node_id(), Some((ReferenceTypeId::HasTypeDefinition, false))); - if let Some(type_defs) = type_defs { - ExpandedNodeId::new(type_defs[0].target_node.clone()) - } else { - ExpandedNodeId::null() + let target_node_class = + if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_NODE_CLASS) { + target_node_class + } else { + NodeClass::Unspecified + }; + let browse_name = + if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_BROWSE_NAME) { + target_node.browse_name().clone() + } else { + QualifiedName::null() + }; + let display_name = + if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_DISPLAY_NAME) { + target_node.display_name().clone() + } else { + LocalizedText::null() + }; + let type_definition = + if result_mask.contains(BrowseDescriptionResultMask::RESULT_MASK_TYPE_DEFINITION) { + // Type definition NodeId of the TargetNode. Type definitions are only available + // for the NodeClasses Object and Variable. For all other NodeClasses a null NodeId + // shall be returned. + match target_node_class { + NodeClass::Object | NodeClass::Variable => { + let type_defs = address_space.find_references( + &target_node.node_id(), + Some((ReferenceTypeId::HasTypeDefinition, false)), + ); + if let Some(type_defs) = type_defs { + ExpandedNodeId::new(type_defs[0].target_node.clone()) + } else { + ExpandedNodeId::null() + } } + _ => ExpandedNodeId::null(), } - _ => { - ExpandedNodeId::null() - } - } - } else { - ExpandedNodeId::null() - }; + } else { + ExpandedNodeId::null() + }; let reference_description = ReferenceDescription { node_id: ExpandedNodeId::new(target_node_id), @@ -358,19 +459,42 @@ impl ViewService { reference_descriptions.push(reference_description); } - Ok(Self::reference_description_to_browse_result(session, address_space, &reference_descriptions, 0, max_references_per_node)) + Ok(Self::reference_description_to_browse_result( + session, + address_space, + &reference_descriptions, + 0, + max_references_per_node, + )) } - fn browse_from_continuation_point(session: &mut Session, address_space: &AddressSpace, continuation_point: &ByteString) -> BrowseResult { + fn browse_from_continuation_point( + session: &mut Session, + address_space: &AddressSpace, + continuation_point: &ByteString, + ) -> BrowseResult { // Find the continuation point in the session - if let Some(continuation_point) = session.find_browse_continuation_point(continuation_point) { - debug!("Browsing from continuation point {}", continuation_point.id.as_base64()); + if let Some(continuation_point) = session.find_browse_continuation_point(continuation_point) + { + debug!( + "Browsing from continuation point {}", + continuation_point.id.as_base64() + ); let reference_descriptions = continuation_point.reference_descriptions.lock().unwrap(); // Use the existing result. This may result in another continuation point being created - Self::reference_description_to_browse_result(session, address_space, &reference_descriptions, continuation_point.starting_index, continuation_point.max_references_per_node) + Self::reference_description_to_browse_result( + session, + address_space, + &reference_descriptions, + continuation_point.starting_index, + continuation_point.max_references_per_node, + ) } else { // Not valid or missing - error!("Continuation point {} was invalid", continuation_point.as_base64()); + error!( + "Continuation point {} was invalid", + continuation_point.as_base64() + ); BrowseResult { status_code: StatusCode::BadContinuationPointInvalid, continuation_point: ByteString::null(), @@ -379,12 +503,21 @@ impl ViewService { } } - fn reference_description_to_browse_result(session: &mut Session, address_space: &AddressSpace, reference_descriptions: &[ReferenceDescription], starting_index: usize, max_references_per_node: usize) -> BrowseResult { + fn reference_description_to_browse_result( + session: &mut Session, + address_space: &AddressSpace, + reference_descriptions: &[ReferenceDescription], + starting_index: usize, + max_references_per_node: usize, + ) -> BrowseResult { let references_remaining = reference_descriptions.len() - starting_index; - let (reference_descriptions, continuation_point) = if max_references_per_node > 0 && references_remaining > max_references_per_node { + let (reference_descriptions, continuation_point) = if max_references_per_node > 0 + && references_remaining > max_references_per_node + { // There is too many results for a single browse result, so only a result will be used let next_starting_index = starting_index + max_references_per_node; - let reference_descriptions_slice = reference_descriptions[starting_index..next_starting_index].to_vec(); + let reference_descriptions_slice = + reference_descriptions[starting_index..next_starting_index].to_vec(); // TODO it is wasteful to create a new reference_descriptions vec if the caller to this fn // already has a ref counted reference_descriptions. We could clone the Arc if the fn could @@ -407,7 +540,11 @@ impl ViewService { } else { // Returns the remainder of the results let reference_descriptions_slice = reference_descriptions[starting_index..].to_vec(); - debug!("Returning references {}..{}, with no further continuation point", starting_index, reference_descriptions.len()); + debug!( + "Returning references {}..{}, with no further continuation point", + starting_index, + reference_descriptions.len() + ); (reference_descriptions_slice, ByteString::null()) }; BrowseResult { @@ -416,4 +553,4 @@ impl ViewService { references: Some(reference_descriptions), } } -} \ No newline at end of file +} diff --git a/server/src/session.rs b/server/src/session.rs index fd6526bd6..96906ad5e 100644 --- a/server/src/session.rs +++ b/server/src/session.rs @@ -4,8 +4,8 @@ use std::{ collections::{HashSet, VecDeque}, sync::{ - Arc, atomic::{AtomicI32, Ordering}, - RwLock, + atomic::{AtomicI32, Ordering}, + Arc, RwLock, }, }; @@ -13,9 +13,7 @@ use chrono::{self, Utc}; use opcua_core::comms::secure_channel::{Role, SecureChannel}; use opcua_crypto::X509; -use opcua_types::{ - *, service_types::PublishRequest, status_code::StatusCode, -}; +use opcua_types::{service_types::PublishRequest, status_code::StatusCode, *}; use crate::{ address_space::{AddressSpace, UserAccessLevel}, @@ -53,7 +51,6 @@ pub enum ServerUserIdentityToken { Invalid(ExtensionObject), } - /// The Session is any state maintained between the client and server pub struct Session { /// Subscriptions associated with the session @@ -162,7 +159,10 @@ impl Session { let diagnostics = server_state.diagnostics.clone(); let (decoding_limits, can_modify_address_space) = { let config = trace_read_lock_unwrap!(server_state.config); - (config.decoding_limits(), config.limits.clients_can_modify_address_space) + ( + config.decoding_limits(), + config.limits.clients_can_modify_address_space, + ) }; let session = Session { @@ -175,7 +175,11 @@ impl Session { client_certificate: None, security_policy_uri: String::new(), authentication_token: NodeId::null(), - secure_channel: Arc::new(RwLock::new(SecureChannel::new(server.certificate_store(), Role::Server, decoding_limits))), + secure_channel: Arc::new(RwLock::new(SecureChannel::new( + server.certificate_store(), + Role::Server, + decoding_limits, + ))), session_nonce: ByteString::null(), session_name: UAString::null(), session_timeout: 0f64, @@ -197,19 +201,29 @@ impl Session { } session } - pub fn secure_channel(&self) -> Arc> { self.secure_channel.clone() } + pub fn secure_channel(&self) -> Arc> { + self.secure_channel.clone() + } - pub fn session_id(&self) -> &NodeId { &self.session_id } + pub fn session_id(&self) -> &NodeId { + &self.session_id + } pub fn set_activated(&mut self, activated: bool) { self.activated = activated; } - pub fn is_activated(&self) -> bool { self.activated } + pub fn is_activated(&self) -> bool { + self.activated + } - pub fn is_terminated(&self) -> bool { self.terminated } + pub fn is_terminated(&self) -> bool { + self.terminated + } - pub fn terminated_at(&self) -> DateTimeUtc { self.terminated_at.clone() } + pub fn terminated_at(&self) -> DateTimeUtc { + self.terminated_at.clone() + } pub fn set_terminated(&mut self) { info!("Session being set to terminated"); @@ -261,7 +275,10 @@ impl Session { self.last_service_request_timestamp.clone() } - pub fn set_last_service_request_timestamp(&mut self, last_service_request_timestamp: DateTimeUtc) { + pub fn set_last_service_request_timestamp( + &mut self, + last_service_request_timestamp: DateTimeUtc, + ) { self.last_service_request_timestamp = last_service_request_timestamp; } @@ -289,7 +306,9 @@ impl Session { self.session_nonce = session_nonce; } - pub fn session_name(&self) -> &UAString { &self.session_name } + pub fn session_name(&self) -> &UAString { + &self.session_name + } pub fn set_session_name(&mut self, session_name: UAString) { self.session_name = session_name; @@ -307,11 +326,23 @@ impl Session { &mut self.subscriptions } - pub(crate) fn enqueue_publish_request(&mut self, now: &DateTimeUtc, request_id: u32, request: PublishRequest, address_space: &AddressSpace) -> Result<(), StatusCode> { - self.subscriptions.enqueue_publish_request(now, request_id, request, address_space) + pub(crate) fn enqueue_publish_request( + &mut self, + now: &DateTimeUtc, + request_id: u32, + request: PublishRequest, + address_space: &AddressSpace, + ) -> Result<(), StatusCode> { + self.subscriptions + .enqueue_publish_request(now, request_id, request, address_space) } - pub(crate) fn tick_subscriptions(&mut self, now: &DateTimeUtc, address_space: &AddressSpace, reason: TickReason) -> Result<(), StatusCode> { + pub(crate) fn tick_subscriptions( + &mut self, + now: &DateTimeUtc, + address_space: &AddressSpace, + reason: TickReason, + ) -> Result<(), StatusCode> { self.subscriptions.tick(now, address_space, reason) } @@ -329,25 +360,42 @@ impl Session { self.subscriptions.expire_stale_publish_requests(now); } - pub(crate) fn add_browse_continuation_point(&mut self, continuation_point: BrowseContinuationPoint) { + pub(crate) fn add_browse_continuation_point( + &mut self, + continuation_point: BrowseContinuationPoint, + ) { // Remove excess browse continuation points while self.browse_continuation_points.len() >= self.max_browse_continuation_points { let continuation_point = self.browse_continuation_points.pop_front(); - debug!("Removing old continuation point {} to make way for new one", continuation_point.unwrap().id.as_base64()); + debug!( + "Removing old continuation point {} to make way for new one", + continuation_point.unwrap().id.as_base64() + ); } - self.browse_continuation_points.push_back(continuation_point); + self.browse_continuation_points + .push_back(continuation_point); } /// Finds and REMOVES a continuation point by id. - pub(crate) fn find_browse_continuation_point(&mut self, id: &ByteString) -> Option { - if let Some(idx) = self.browse_continuation_points.iter().position(|continuation_point| continuation_point.id == *id) { + pub(crate) fn find_browse_continuation_point( + &mut self, + id: &ByteString, + ) -> Option { + if let Some(idx) = self + .browse_continuation_points + .iter() + .position(|continuation_point| continuation_point.id == *id) + { self.browse_continuation_points.remove(idx) } else { None } } - pub(crate) fn remove_expired_browse_continuation_points(&mut self, address_space: &AddressSpace) { + pub(crate) fn remove_expired_browse_continuation_points( + &mut self, + address_space: &AddressSpace, + ) { self.browse_continuation_points.retain(|continuation_point| { let valid = continuation_point.is_valid_browse_continuation_point(address_space); if !valid { @@ -360,11 +408,11 @@ impl Session { /// Remove all the specified continuation points by id pub(crate) fn remove_browse_continuation_points(&mut self, continuation_points: &[ByteString]) { // Turn the supplied slice into a set - let continuation_points_set: HashSet = continuation_points.iter().cloned().collect(); + let continuation_points_set: HashSet = + continuation_points.iter().cloned().collect(); // Now remove any continuation points that are part of that set - self.browse_continuation_points.retain(|continuation_point| { - !continuation_points_set.contains(&continuation_point.id) - }); + self.browse_continuation_points + .retain(|continuation_point| !continuation_points_set.contains(&continuation_point.id)); } pub(crate) fn can_modify_address_space(&self) -> bool { @@ -376,7 +424,12 @@ impl Session { self.can_modify_address_space = can_modify_address_space; } - pub(crate) fn effective_user_access_level(&self, user_access_level: UserAccessLevel, _node_id: &NodeId, _attribute_id: AttributeId) -> UserAccessLevel { + pub(crate) fn effective_user_access_level( + &self, + user_access_level: UserAccessLevel, + _node_id: &NodeId, + _attribute_id: AttributeId, + ) -> UserAccessLevel { // TODO session could modify the user_access_level further here via user / groups user_access_level } @@ -387,9 +440,7 @@ impl Session { pub fn client_user_id(&self) -> UAString { match self.user_identity { IdentityToken::None | IdentityToken::AnonymousIdentityToken(_) => UAString::null(), - IdentityToken::UserNameIdentityToken(ref token) => { - token.user_name.clone() - } + IdentityToken::UserNameIdentityToken(ref token) => token.user_name.clone(), IdentityToken::X509IdentityToken(ref token) => { if let Ok(cert) = X509::from_byte_string(&token.certificate_data) { UAString::from(cert.subject_name()) @@ -397,9 +448,7 @@ impl Session { UAString::from("Invalid certificate") } } - IdentityToken::Invalid(_) => { - UAString::from("invalid") - } + IdentityToken::Invalid(_) => UAString::from("invalid"), } } diff --git a/server/src/session_diagnostics.rs b/server/src/session_diagnostics.rs index 880d3eedd..66fb7d4e4 100644 --- a/server/src/session_diagnostics.rs +++ b/server/src/session_diagnostics.rs @@ -1,15 +1,9 @@ use std::collections::HashMap; -use opcua_types::{ - node_ids::ObjectTypeId, - service_types::ServiceCounterDataType, -}; +use opcua_types::{node_ids::ObjectTypeId, service_types::ServiceCounterDataType}; use crate::{ - address_space::{ - address_space::AddressSpace, - object::ObjectBuilder, - }, + address_space::{address_space::AddressSpace, object::ObjectBuilder}, session::Session, }; @@ -40,59 +34,63 @@ impl SessionDiagnostics { debug!("register_session for session id {}", session_id); debug!("Adding an object node for the session id {}", session_id); - let builder = ObjectBuilder::new(session_id, format!("{}", session_id), format!("{}", session_id)) - .has_type_definition(ObjectTypeId::SessionDiagnosticsObjectType) - .insert(address_space); + let builder = ObjectBuilder::new( + session_id, + format!("{}", session_id), + format!("{}", session_id), + ) + .has_type_definition(ObjectTypeId::SessionDiagnosticsObjectType) + .insert(address_space); // Now add variables /* - 12816 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics), - 12817 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_SessionId), - 12818 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_SessionName), - 12819 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ClientDescription), - 12820 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ServerUri), - 12821 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_EndpointUrl), - 12822 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_LocaleIds), - 12823 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ActualSessionTimeout), - 12824 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_MaxResponseMessageSize), - 12825 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ClientConnectionTime), - 12826 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ClientLastContactTime), - 12827 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CurrentSubscriptionsCount), - 12828 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CurrentMonitoredItemsCount), - 12829 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CurrentPublishRequestsInQueue), - - 12830 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_TotalRequestCount), - 12831 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_UnauthorizedRequestCount), - - 12832 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ReadCount), - 12833 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_HistoryReadCount), - 12834 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_WriteCount), - 12835 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_HistoryUpdateCount), - 12836 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CallCount), - 12837 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CreateMonitoredItemsCount), - 12838 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ModifyMonitoredItemsCount), - 12839 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_SetMonitoringModeCount), - 12840 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_SetTriggeringCount), - 12841 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_DeleteMonitoredItemsCount), - 12842 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CreateSubscriptionCount), - 12843 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ModifySubscriptionCount), - 12844 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_SetPublishingModeCount), - 12845 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_PublishCount), - 12846 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_RepublishCount), - 12847 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_TransferSubscriptionsCount), - 12848 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_DeleteSubscriptionsCount), - 12849 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_AddNodesCount), - 12850 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_AddReferencesCount), - 12851 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_DeleteNodesCount), - 12852 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_DeleteReferencesCount), - 12853 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_BrowseCount), - 12854 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_BrowseNextCount), - 12855 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_TranslateBrowsePathsToNodeIdsCount), - 12856 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_QueryFirstCount), - 12857 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_QueryNextCount), - 12858 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_RegisterNodesCount), - 12859 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_UnregisterNodesCount), - */ + 12816 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics), + 12817 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_SessionId), + 12818 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_SessionName), + 12819 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ClientDescription), + 12820 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ServerUri), + 12821 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_EndpointUrl), + 12822 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_LocaleIds), + 12823 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ActualSessionTimeout), + 12824 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_MaxResponseMessageSize), + 12825 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ClientConnectionTime), + 12826 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ClientLastContactTime), + 12827 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CurrentSubscriptionsCount), + 12828 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CurrentMonitoredItemsCount), + 12829 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CurrentPublishRequestsInQueue), + + 12830 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_TotalRequestCount), + 12831 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_UnauthorizedRequestCount), + + 12832 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ReadCount), + 12833 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_HistoryReadCount), + 12834 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_WriteCount), + 12835 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_HistoryUpdateCount), + 12836 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CallCount), + 12837 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CreateMonitoredItemsCount), + 12838 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ModifyMonitoredItemsCount), + 12839 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_SetMonitoringModeCount), + 12840 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_SetTriggeringCount), + 12841 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_DeleteMonitoredItemsCount), + 12842 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_CreateSubscriptionCount), + 12843 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_ModifySubscriptionCount), + 12844 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_SetPublishingModeCount), + 12845 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_PublishCount), + 12846 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_RepublishCount), + 12847 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_TransferSubscriptionsCount), + 12848 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_DeleteSubscriptionsCount), + 12849 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_AddNodesCount), + 12850 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_AddReferencesCount), + 12851 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_DeleteNodesCount), + 12852 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_DeleteReferencesCount), + 12853 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_BrowseCount), + 12854 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_BrowseNextCount), + 12855 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_TranslateBrowsePathsToNodeIdsCount), + 12856 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_QueryFirstCount), + 12857 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_QueryNextCount), + 12858 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_RegisterNodesCount), + 12859 => Ok(VariableId::SessionDiagnosticsArrayType_SessionDiagnostics_UnregisterNodesCount), + */ // Browse name shall be session name // session id is the nodeid @@ -138,7 +136,10 @@ impl SessionDiagnostics { } /// Fetches a snapshot of the current service counter value - pub(crate) fn service_counter(&mut self, diagnostic_key: &'static str) -> ServiceCounterDataType { + pub(crate) fn service_counter( + &mut self, + diagnostic_key: &'static str, + ) -> ServiceCounterDataType { if let Some(counter) = self.service_counters.get_mut(diagnostic_key) { counter.clone() } else { @@ -192,7 +193,8 @@ pub(crate) const DELETE_NODES_COUNT: &'static str = "DeleteNodesCount"; pub(crate) const DELETE_REFERENCES_COUNT: &'static str = "DeleteReferencesCount"; pub(crate) const BROWSE_COUNT: &'static str = "BrowseCount"; pub(crate) const BROWSE_NEXT_COUNT: &'static str = "BrowseNextCount"; -pub(crate) const TRANSLATE_BROWSE_PATHS_TO_NODE_IDS_COUNT: &'static str = "TranslateBrowsePathsToNodeIdsCount"; +pub(crate) const TRANSLATE_BROWSE_PATHS_TO_NODE_IDS_COUNT: &'static str = + "TranslateBrowsePathsToNodeIdsCount"; //pub(crate) const QUERY_FIRST_COUNT: &'static str = "QueryFirstCount"; //pub(crate) const QUERY_NEXT_COUNT: &'static str = "QueryNextCount"; pub(crate) const REGISTER_NODES_COUNT: &'static str = "RegisterNodesCount"; diff --git a/server/src/state.rs b/server/src/state.rs index 941aa2ad5..e36a4d7b1 100644 --- a/server/src/state.rs +++ b/server/src/state.rs @@ -7,13 +7,13 @@ use std::sync::{Arc, RwLock}; use opcua_core::prelude::*; -use opcua_crypto::{PrivateKey, SecurityPolicy, user_identity, X509}; +use opcua_crypto::{user_identity, PrivateKey, SecurityPolicy, X509}; use opcua_types::{ profiles, service_types::{ - ActivateSessionRequest, AnonymousIdentityToken, ApplicationDescription, ApplicationType, EndpointDescription, - RegisteredServer, ServerState as ServerStateType, SignatureData, UserNameIdentityToken, UserTokenPolicy, UserTokenType, - X509IdentityToken, + ActivateSessionRequest, AnonymousIdentityToken, ApplicationDescription, ApplicationType, + EndpointDescription, RegisteredServer, ServerState as ServerStateType, SignatureData, + UserNameIdentityToken, UserTokenPolicy, UserTokenType, X509IdentityToken, }, status_code::StatusCode, }; @@ -28,7 +28,10 @@ use crate::{ event::Event, }, historical::{HistoricalDataProvider, HistoricalEventProvider}, - identity_token::{IdentityToken, POLICY_ID_ANONYMOUS, POLICY_ID_USER_PASS_NONE, POLICY_ID_USER_PASS_RSA_15, POLICY_ID_USER_PASS_RSA_OAEP, POLICY_ID_X509}, + identity_token::{ + IdentityToken, POLICY_ID_ANONYMOUS, POLICY_ID_USER_PASS_NONE, POLICY_ID_USER_PASS_RSA_15, + POLICY_ID_USER_PASS_RSA_OAEP, POLICY_ID_X509, + }, }; pub(crate) struct OperationalLimits { @@ -49,7 +52,8 @@ pub(crate) struct OperationalLimits { impl Default for OperationalLimits { fn default() -> Self { Self { - max_nodes_per_translate_browse_paths_to_node_ids: constants::MAX_NODES_PER_TRANSLATE_BROWSE_PATHS_TO_NODE_IDS, + max_nodes_per_translate_browse_paths_to_node_ids: + constants::MAX_NODES_PER_TRANSLATE_BROWSE_PATHS_TO_NODE_IDS, max_nodes_per_read: constants::MAX_NODES_PER_READ, max_nodes_per_write: constants::MAX_NODES_PER_WRITE, max_nodes_per_method_call: constants::MAX_NODES_PER_METHOD_CALL, @@ -124,9 +128,16 @@ pub struct ServerState { } impl ServerState { - pub fn endpoints(&self, endpoint_url: &UAString, transport_profile_uris: &Option>) -> Option> { + pub fn endpoints( + &self, + endpoint_url: &UAString, + transport_profile_uris: &Option>, + ) -> Option> { // Filter endpoints based on profile_uris - debug!("Endpoints requested, transport profile uris {:?}", transport_profile_uris); + debug!( + "Endpoints requested, transport profile uris {:?}", + transport_profile_uris + ); if let Some(ref transport_profile_uris) = *transport_profile_uris { // Note - some clients pass an empty array if !transport_profile_uris.is_empty() { @@ -135,7 +146,10 @@ impl ServerState { profile_uri.as_ref() == profiles::TRANSPORT_PROFILE_URI_BINARY }); if !found_binary_transport { - error!("Client wants to connect with a non binary transport {:#?}", transport_profile_uris); + error!( + "Client wants to connect with a non binary transport {:#?}", + transport_profile_uris + ); return None; } } @@ -146,40 +160,61 @@ impl ServerState { if !hostname.eq_ignore_ascii_case(&config.tcp_config.host) { debug!("Endpoint url \"{}\" hostname supplied by caller does not match server's hostname \"{}\"", endpoint_url, &config.tcp_config.host); } - let endpoints = config.endpoints.iter() - .map(|(_, e)| { - self.new_endpoint_description(&config, e, true) - }) + let endpoints = config + .endpoints + .iter() + .map(|(_, e)| self.new_endpoint_description(&config, e, true)) .collect(); Some(endpoints) } else { - warn!("Endpoint url \"{}\" is unrecognized, using default", endpoint_url); + warn!( + "Endpoint url \"{}\" is unrecognized, using default", + endpoint_url + ); if let Some(e) = config.default_endpoint() { Some(vec![self.new_endpoint_description(&config, e, true)]) - } - else { + } else { Some(vec![]) } } } - pub fn endpoint_exists(&self, endpoint_url: &str, security_policy: SecurityPolicy, security_mode: MessageSecurityMode) -> bool { + pub fn endpoint_exists( + &self, + endpoint_url: &str, + security_policy: SecurityPolicy, + security_mode: MessageSecurityMode, + ) -> bool { let config = trace_read_lock_unwrap!(self.config); - config.find_endpoint(endpoint_url, security_policy, security_mode).is_some() + config + .find_endpoint(endpoint_url, security_policy, security_mode) + .is_some() } /// Make matching endpoint descriptions for the specified url. /// If none match then None will be passed, therefore if Some is returned it will be guaranteed /// to contain at least one result. - pub fn new_endpoint_descriptions(&self, endpoint_url: &str) -> Option> { + pub fn new_endpoint_descriptions( + &self, + endpoint_url: &str, + ) -> Option> { debug!("find_endpoint, url = {}", endpoint_url); let config = trace_read_lock_unwrap!(self.config); let base_endpoint_url = config.base_endpoint_url(); - let endpoints: Vec = config.endpoints.iter().filter(|&(_, e)| { - // Test end point's security_policy_uri and matching url - url_matches_except_host(&e.endpoint_url(&base_endpoint_url), endpoint_url) - }).map(|(_, e)| self.new_endpoint_description(&config, e, false)).collect(); - if endpoints.is_empty() { None } else { Some(endpoints) } + let endpoints: Vec = config + .endpoints + .iter() + .filter(|&(_, e)| { + // Test end point's security_policy_uri and matching url + url_matches_except_host(&e.endpoint_url(&base_endpoint_url), endpoint_url) + }) + .map(|(_, e)| self.new_endpoint_description(&config, e, false)) + .collect(); + if endpoints.is_empty() { + None + } else { + Some(endpoints) + } } /// Determine what user/pass encryption to use depending on the security policy. @@ -187,11 +222,18 @@ impl ServerState { match endpoint.password_security_policy() { SecurityPolicy::None => POLICY_ID_USER_PASS_NONE, SecurityPolicy::Basic128Rsa15 => POLICY_ID_USER_PASS_RSA_15, - SecurityPolicy::Basic256 | SecurityPolicy::Basic256Sha256 => POLICY_ID_USER_PASS_RSA_OAEP, + SecurityPolicy::Basic256 | SecurityPolicy::Basic256Sha256 => { + POLICY_ID_USER_PASS_RSA_OAEP + } // TODO this is a placeholder - SecurityPolicy::Aes128Sha256RsaOaep | SecurityPolicy::Aes256Sha256RsaPss => POLICY_ID_USER_PASS_RSA_OAEP, - _ => { panic!() } - }.into() + SecurityPolicy::Aes128Sha256RsaOaep | SecurityPolicy::Aes256Sha256RsaPss => { + POLICY_ID_USER_PASS_RSA_OAEP + } + _ => { + panic!() + } + } + .into() } fn user_pass_security_policy_uri(_endpoint: &ServerEndpoint) -> UAString { @@ -200,7 +242,11 @@ impl ServerState { UAString::null() } - fn user_identity_tokens(&self, config: &ServerConfig, endpoint: &ServerEndpoint) -> Vec { + fn user_identity_tokens( + &self, + config: &ServerConfig, + endpoint: &ServerEndpoint, + ) -> Vec { let mut user_identity_tokens = Vec::with_capacity(3); // Anonymous policy @@ -236,14 +282,22 @@ impl ServerState { } if user_identity_tokens.is_empty() { - debug!("user_identity_tokens() returned zero endpoints for endpoint {} / {} {}", endpoint.path, endpoint.security_policy, endpoint.security_mode); + debug!( + "user_identity_tokens() returned zero endpoints for endpoint {} / {} {}", + endpoint.path, endpoint.security_policy, endpoint.security_mode + ); } user_identity_tokens } /// Constructs a new endpoint description using the server's info and that in an Endpoint - fn new_endpoint_description(&self, config: &ServerConfig, endpoint: &ServerEndpoint, all_fields: bool) -> EndpointDescription { + fn new_endpoint_description( + &self, + config: &ServerConfig, + endpoint: &ServerEndpoint, + all_fields: bool, + ) -> EndpointDescription { let base_endpoint_url = config.base_endpoint_url(); let user_identity_tokens = self.user_identity_tokens(config, endpoint); @@ -252,25 +306,31 @@ impl ServerState { // and docs say not to bother sending the server and server // certificate info. let (server, server_certificate) = if all_fields { - (ApplicationDescription { - application_uri: self.application_uri.clone(), - product_uri: self.product_uri.clone(), - application_name: self.application_name.clone(), - application_type: self.application_type(), - gateway_server_uri: self.gateway_server_uri(), - discovery_profile_uri: UAString::null(), - discovery_urls: self.discovery_urls(), - }, self.server_certificate_as_byte_string()) + ( + ApplicationDescription { + application_uri: self.application_uri.clone(), + product_uri: self.product_uri.clone(), + application_name: self.application_name.clone(), + application_type: self.application_type(), + gateway_server_uri: self.gateway_server_uri(), + discovery_profile_uri: UAString::null(), + discovery_urls: self.discovery_urls(), + }, + self.server_certificate_as_byte_string(), + ) } else { - (ApplicationDescription { - application_uri: UAString::null(), - product_uri: UAString::null(), - application_name: LocalizedText::null(), - application_type: self.application_type(), - gateway_server_uri: self.gateway_server_uri(), - discovery_profile_uri: UAString::null(), - discovery_urls: self.discovery_urls(), - }, ByteString::null()) + ( + ApplicationDescription { + application_uri: UAString::null(), + product_uri: UAString::null(), + application_name: LocalizedText::null(), + application_type: self.application_type(), + gateway_server_uri: self.gateway_server_uri(), + discovery_profile_uri: UAString::null(), + discovery_urls: self.discovery_urls(), + }, + ByteString::null(), + ) }; EndpointDescription { @@ -290,13 +350,23 @@ impl ServerState { if config.discovery_urls.is_empty() { None } else { - Some(config.discovery_urls.iter().map(|url| UAString::from(url)).collect()) + Some( + config + .discovery_urls + .iter() + .map(|url| UAString::from(url)) + .collect(), + ) } } - pub fn application_type(&self) -> ApplicationType { ApplicationType::Server } + pub fn application_type(&self) -> ApplicationType { + ApplicationType::Server + } - pub fn gateway_server_uri(&self) -> UAString { UAString::null() } + pub fn gateway_server_uri(&self) -> UAString { + UAString::null() + } pub fn abort(&mut self) { info!("Server has been told to abort"); @@ -304,15 +374,21 @@ impl ServerState { self.state = ServerStateType::Shutdown; } - pub fn state(&self) -> ServerStateType { self.state } + pub fn state(&self) -> ServerStateType { + self.state + } pub fn set_state(&mut self, state: ServerStateType) { self.state = state; } - pub fn is_abort(&self) -> bool { self.abort } + pub fn is_abort(&self) -> bool { + self.abort + } - pub fn is_running(&self) -> bool { self.state == ServerStateType::Running } + pub fn is_running(&self) -> bool { + self.state == ServerStateType::Running + } pub fn server_certificate_as_byte_string(&self) -> ByteString { if let Some(ref server_certificate) = self.server_certificate { @@ -354,7 +430,15 @@ impl ServerState { /// It is possible that the endpoint does not exist, or that the token is invalid / unsupported /// or that the token cannot be used with the end point. The return codes reflect the responses /// that ActivateSession would expect from a service call. - pub fn authenticate_endpoint(&self, request: &ActivateSessionRequest, endpoint_url: &str, security_policy: SecurityPolicy, security_mode: MessageSecurityMode, user_identity_token: &ExtensionObject, server_nonce: &ByteString) -> Result { + pub fn authenticate_endpoint( + &self, + request: &ActivateSessionRequest, + endpoint_url: &str, + security_policy: SecurityPolicy, + security_mode: MessageSecurityMode, + user_identity_token: &ExtensionObject, + server_nonce: &ByteString, + ) -> Result { // Get security from endpoint url let config = trace_read_lock_unwrap!(self.config); @@ -368,12 +452,22 @@ impl ServerState { IdentityToken::AnonymousIdentityToken(token) => { Self::authenticate_anonymous_token(endpoint, &token) } - IdentityToken::UserNameIdentityToken(token) => { - self.authenticate_username_identity_token(&config, endpoint, &token, &self.server_pkey, server_nonce) - } - IdentityToken::X509IdentityToken(token) => { - self.authenticate_x509_identity_token(&config, endpoint, &token, &request.user_token_signature, &self.server_certificate, server_nonce) - } + IdentityToken::UserNameIdentityToken(token) => self + .authenticate_username_identity_token( + &config, + endpoint, + &token, + &self.server_pkey, + server_nonce, + ), + IdentityToken::X509IdentityToken(token) => self.authenticate_x509_identity_token( + &config, + endpoint, + &token, + &request.user_token_signature, + &self.server_certificate, + server_nonce, + ), IdentityToken::Invalid(o) => { error!("User identity token type {:?} is unsupported", o.node_id); Err(StatusCode::BadIdentityTokenInvalid) @@ -385,7 +479,11 @@ impl ServerState { } } - pub fn set_register_nodes_callbacks(&mut self, register_nodes_callback: Box, unregister_nodes_callback: Box) { + pub fn set_register_nodes_callbacks( + &mut self, + register_nodes_callback: Box, + unregister_nodes_callback: Box, + ) { self.register_nodes_callback = Some(register_nodes_callback); self.unregister_nodes_callback = Some(unregister_nodes_callback); } @@ -397,12 +495,18 @@ impl ServerState { } /// Authenticates an anonymous token, i.e. does the endpoint support anonymous access or not - fn authenticate_anonymous_token(endpoint: &ServerEndpoint, token: &AnonymousIdentityToken) -> Result { + fn authenticate_anonymous_token( + endpoint: &ServerEndpoint, + token: &AnonymousIdentityToken, + ) -> Result { if token.policy_id.as_ref() != POLICY_ID_ANONYMOUS { error!("Token doesn't possess the correct policy id"); Err(StatusCode::BadIdentityTokenInvalid) } else if !endpoint.supports_anonymous() { - error!("Endpoint \"{}\" does not support anonymous authentication", endpoint.path); + error!( + "Endpoint \"{}\" does not support anonymous authentication", + endpoint.path + ); Err(StatusCode::BadIdentityTokenRejected) } else { debug!("Anonymous identity is authenticated"); @@ -412,7 +516,14 @@ impl ServerState { /// Authenticates the username identity token with the supplied endpoint. The function returns the user token identifier /// that matches the identity token. - fn authenticate_username_identity_token(&self, config: &ServerConfig, endpoint: &ServerEndpoint, token: &UserNameIdentityToken, server_key: &Option, server_nonce: &ByteString) -> Result { + fn authenticate_username_identity_token( + &self, + config: &ServerConfig, + endpoint: &ServerEndpoint, + token: &UserNameIdentityToken, + server_key: &Option, + server_nonce: &ByteString, + ) -> Result { if !endpoint.supports_user_pass(&config.user_tokens) { error!("Endpoint doesn't support username password tokens"); Err(StatusCode::BadIdentityTokenRejected) @@ -423,10 +534,18 @@ impl ServerState { error!("User identify token supplies no user name"); Err(StatusCode::BadIdentityTokenInvalid) } else { - debug!("policy id = {}, encryption algorithm = {}", token.policy_id.as_ref(), token.encryption_algorithm.as_ref()); + debug!( + "policy id = {}, encryption algorithm = {}", + token.policy_id.as_ref(), + token.encryption_algorithm.as_ref() + ); let token_password = if !token.encryption_algorithm.is_null() { if let Some(ref server_key) = server_key { - user_identity::decrypt_user_identity_token_password(&token, server_nonce.as_ref(), server_key)? + user_identity::decrypt_user_identity_token_password( + &token, + server_nonce.as_ref(), + server_key, + )? } else { error!("Identity token password is encrypted but no server private key was supplied"); return Err(StatusCode::BadIdentityTokenInvalid); @@ -438,18 +557,24 @@ impl ServerState { // Iterate ids in endpoint for user_token_id in &endpoint.user_token_ids { if let Some(server_user_token) = config.user_tokens.get(user_token_id) { - if server_user_token.is_user_pass() && &server_user_token.user == token.user_name.as_ref() { + if server_user_token.is_user_pass() + && &server_user_token.user == token.user_name.as_ref() + { // test for empty password let valid = if server_user_token.pass.is_none() { // Empty password for user token_password.is_empty() } else { // Password compared as UTF-8 bytes - let server_password = server_user_token.pass.as_ref().unwrap().as_bytes(); + let server_password = + server_user_token.pass.as_ref().unwrap().as_bytes(); server_password == token_password.as_bytes() }; if !valid { - error!("Cannot authenticate \"{}\", password is invalid", server_user_token.user); + error!( + "Cannot authenticate \"{}\", password is invalid", + server_user_token.user + ); return Err(StatusCode::BadUserAccessDenied); } else { return Ok(user_token_id.clone()); @@ -457,14 +582,25 @@ impl ServerState { } } } - error!("Cannot authenticate \"{}\", user not found for endpoint", token.user_name); + error!( + "Cannot authenticate \"{}\", user not found for endpoint", + token.user_name + ); Err(StatusCode::BadUserAccessDenied) } } /// Authenticate the x509 token against the endpoint. The function returns the user token identifier /// that matches the identity token. - fn authenticate_x509_identity_token(&self, config: &ServerConfig, endpoint: &ServerEndpoint, token: &X509IdentityToken, user_token_signature: &SignatureData, server_certificate: &Option, server_nonce: &ByteString) -> Result { + fn authenticate_x509_identity_token( + &self, + config: &ServerConfig, + endpoint: &ServerEndpoint, + token: &X509IdentityToken, + user_token_signature: &SignatureData, + server_certificate: &Option, + server_nonce: &ByteString, + ) -> Result { if !endpoint.supports_x509(&config.user_tokens) { error!("Endpoint doesn't support x509 tokens"); Err(StatusCode::BadIdentityTokenRejected) @@ -474,26 +610,32 @@ impl ServerState { } else { let result = match server_certificate { Some(ref server_certificate) => { - // Find the security policy used for verifying tokens let user_identity_tokens = self.user_identity_tokens(config, endpoint); - let security_policy = user_identity_tokens.iter() + let security_policy = user_identity_tokens + .iter() .find(|t| t.token_type == UserTokenType::Certificate) .map(|t| SecurityPolicy::from_uri(t.security_policy_uri.as_ref())) .unwrap_or(endpoint.security_policy()); // The security policy has to be something that can encrypt match security_policy { - SecurityPolicy::Unknown | SecurityPolicy::None => Err(StatusCode::BadIdentityTokenInvalid), + SecurityPolicy::Unknown | SecurityPolicy::None => { + Err(StatusCode::BadIdentityTokenInvalid) + } security_policy => { // Verify token - user_identity::verify_x509_identity_token(token, user_token_signature, security_policy, server_certificate, server_nonce.as_ref()) + user_identity::verify_x509_identity_token( + token, + user_token_signature, + security_policy, + server_certificate, + server_nonce.as_ref(), + ) } } } - None => { - Err(StatusCode::BadIdentityTokenInvalid) - } + None => Err(StatusCode::BadIdentityTokenInvalid), }; result.and_then(|_| { // Check the endpoint to see if this token is supported @@ -514,15 +656,24 @@ impl ServerState { } } - pub fn set_historical_data_provider(&mut self, historical_data_provider: Box) { + pub fn set_historical_data_provider( + &mut self, + historical_data_provider: Box, + ) { self.historical_data_provider = Some(historical_data_provider); } - pub fn set_historical_event_provider(&mut self, historical_event_provider: Box) { + pub fn set_historical_event_provider( + &mut self, + historical_event_provider: Box, + ) { self.historical_event_provider = Some(historical_event_provider); } - pub(crate) fn raise_and_log(&self, event: T) -> Result where T: AuditEvent + Event { + pub(crate) fn raise_and_log(&self, event: T) -> Result + where + T: AuditEvent + Event, + { let audit_log = trace_write_lock_unwrap!(self.audit_log); audit_log.raise_and_log(event) } diff --git a/server/src/subscriptions/mod.rs b/server/src/subscriptions/mod.rs index 17ba0657d..217e356b5 100644 --- a/server/src/subscriptions/mod.rs +++ b/server/src/subscriptions/mod.rs @@ -2,11 +2,8 @@ // SPDX-License-Identifier: MPL-2.0 // Copyright (C) 2017-2020 Adam Lock -use opcua_types::{ - status_code::StatusCode, - service_types::PublishRequest, -}; use opcua_core::supported_message::SupportedMessage; +use opcua_types::{service_types::PublishRequest, status_code::StatusCode}; /// The publish request entry preserves the request_id which is part of the chunk layer but clients /// are fickle about receiving responses from the same as the request. Normally this is easy because @@ -37,6 +34,6 @@ fn duration_from_ms(d: f64) -> time::Duration { time::Duration::microseconds((d * 1000f64) as i64) } -pub mod subscriptions; -pub mod subscription; pub mod monitored_item; +pub mod subscription; +pub mod subscriptions; diff --git a/server/src/subscriptions/monitored_item.rs b/server/src/subscriptions/monitored_item.rs index 03014b5fa..caacff3bc 100644 --- a/server/src/subscriptions/monitored_item.rs +++ b/server/src/subscriptions/monitored_item.rs @@ -6,21 +6,17 @@ use std::collections::{BTreeSet, VecDeque}; use std::result::Result; use opcua_types::{ - *, node_ids::ObjectId, service_types::{ - DataChangeFilter, EventFieldList, EventFilter, MonitoredItemCreateRequest, MonitoredItemModifyRequest, - MonitoredItemNotification, ReadValueId, TimestampsToReturn, + DataChangeFilter, EventFieldList, EventFilter, MonitoredItemCreateRequest, + MonitoredItemModifyRequest, MonitoredItemNotification, ReadValueId, TimestampsToReturn, }, status_code::StatusCode, + *, }; use crate::{ - address_space::{ - AddressSpace, - EventNotifier, - node::Node, - }, + address_space::{node::Node, AddressSpace, EventNotifier}, constants, events::event_filter, }; @@ -61,19 +57,29 @@ impl FilterType { match filter_type_id { ObjectId::DataChangeFilter_Encoding_DefaultBinary => { let decoding_limits = DecodingLimits::minimal(); - Ok(FilterType::DataChangeFilter(filter.decode_inner::(&decoding_limits)?)) + Ok(FilterType::DataChangeFilter( + filter.decode_inner::(&decoding_limits)?, + )) } ObjectId::EventFilter_Encoding_DefaultBinary => { let decoding_limits = DecodingLimits::default(); - Ok(FilterType::EventFilter(filter.decode_inner::(&decoding_limits)?)) + Ok(FilterType::EventFilter( + filter.decode_inner::(&decoding_limits)?, + )) } _ => { - error!("Requested data filter type is not supported, {:?}", filter_type_id); + error!( + "Requested data filter type is not supported, {:?}", + filter_type_id + ); Err(StatusCode::BadFilterNotAllowed) } } } else { - error!("Requested data filter type is not an object id, {:?}", filter_type_id); + error!( + "Requested data filter type is not an object id, {:?}", + filter_type_id + ); Err(StatusCode::BadFilterNotAllowed) } } @@ -112,10 +118,17 @@ pub(crate) enum TickResult { } impl MonitoredItem { - pub fn new(now: &DateTimeUtc, monitored_item_id: u32, timestamps_to_return: TimestampsToReturn, request: &MonitoredItemCreateRequest) -> Result { + pub fn new( + now: &DateTimeUtc, + monitored_item_id: u32, + timestamps_to_return: TimestampsToReturn, + request: &MonitoredItemCreateRequest, + ) -> Result { let filter = FilterType::from_filter(&request.requested_parameters.filter)?; - let sampling_interval = Self::sanitize_sampling_interval(request.requested_parameters.sampling_interval); - let queue_size = Self::sanitize_queue_size(request.requested_parameters.queue_size as usize); + let sampling_interval = + Self::sanitize_sampling_interval(request.requested_parameters.sampling_interval); + let queue_size = + Self::sanitize_queue_size(request.requested_parameters.queue_size as usize); Ok(MonitoredItem { monitored_item_id, item_to_monitor: request.item_to_monitor.clone(), @@ -136,11 +149,18 @@ impl MonitoredItem { /// Modifies the existing item with the values of the modify request. On success, the result /// holds the filter result. - pub fn modify(&mut self, address_space: &AddressSpace, timestamps_to_return: TimestampsToReturn, request: &MonitoredItemModifyRequest) -> Result { + pub fn modify( + &mut self, + address_space: &AddressSpace, + timestamps_to_return: TimestampsToReturn, + request: &MonitoredItemModifyRequest, + ) -> Result { self.timestamps_to_return = timestamps_to_return; self.filter = FilterType::from_filter(&request.requested_parameters.filter)?; - self.sampling_interval = Self::sanitize_sampling_interval(request.requested_parameters.sampling_interval); - self.queue_size = Self::sanitize_queue_size(request.requested_parameters.queue_size as usize); + self.sampling_interval = + Self::sanitize_sampling_interval(request.requested_parameters.sampling_interval); + self.queue_size = + Self::sanitize_queue_size(request.requested_parameters.queue_size as usize); self.client_handle = request.requested_parameters.client_handle; self.discard_oldest = request.requested_parameters.discard_oldest; @@ -164,17 +184,27 @@ impl MonitoredItem { /// Adds or removes other monitored items which will be triggered when this monitored item changes pub fn set_triggering(&mut self, items_to_add: &[u32], items_to_remove: &[u32]) { // Spec says to process remove items before adding new ones. - items_to_remove.iter().for_each(|i| { self.triggered_items.remove(i); }); - items_to_add.iter().for_each(|i| { self.triggered_items.insert(*i); }); + items_to_remove.iter().for_each(|i| { + self.triggered_items.remove(i); + }); + items_to_add.iter().for_each(|i| { + self.triggered_items.insert(*i); + }); } /// Validates the filter associated with the monitored item and returns the filter result /// encoded in an extension object. - pub fn validate_filter(&self, address_space: &AddressSpace) -> Result { + pub fn validate_filter( + &self, + address_space: &AddressSpace, + ) -> Result { // Event filter must be validated let filter_result = if let FilterType::EventFilter(ref event_filter) = self.filter { let filter_result = event_filter::validate(event_filter, address_space)?; - ExtensionObject::from_encodable(ObjectId::EventFilterResult_Encoding_DefaultBinary, &filter_result) + ExtensionObject::from_encodable( + ObjectId::EventFilterResult_Encoding_DefaultBinary, + &filter_result, + ) } else { // DataChangeFilter has no result ExtensionObject::null() @@ -190,7 +220,13 @@ impl MonitoredItem { /// /// Function returns a `TickResult` denoting if the value changed or not, and whether it should /// be reported. - pub fn tick(&mut self, now: &DateTimeUtc, address_space: &AddressSpace, publishing_interval_elapsed: bool, resend_data: bool) -> TickResult { + pub fn tick( + &mut self, + now: &DateTimeUtc, + address_space: &AddressSpace, + publishing_interval_elapsed: bool, + resend_data: bool, + ) -> TickResult { if self.monitoring_mode == MonitoringMode::Disabled { TickResult::NoChange } else { @@ -234,7 +270,12 @@ impl MonitoredItem { /// Gets the event notifier bits for a node, or empty if there are no bits fn get_event_notifier(node: &dyn Node) -> EventNotifier { - if let Some(v) = node.get_attribute(TimestampsToReturn::Neither, AttributeId::EventNotifier, NumericRange::None, &QualifiedName::null()) { + if let Some(v) = node.get_attribute( + TimestampsToReturn::Neither, + AttributeId::EventNotifier, + NumericRange::None, + &QualifiedName::null(), + ) { if let Variant::Byte(v) = v.value.unwrap_or(0u8.into()) { EventNotifier::from_bits_truncate(v) } else { @@ -246,14 +287,27 @@ impl MonitoredItem { } /// Check for - fn check_for_events(&mut self, address_space: &AddressSpace, happened_since: &DateTimeUtc, node: &dyn Node) -> bool { + fn check_for_events( + &mut self, + address_space: &AddressSpace, + happened_since: &DateTimeUtc, + node: &dyn Node, + ) -> bool { match self.filter { FilterType::EventFilter(ref filter) => { // Node has to allow subscribe to events if Self::get_event_notifier(node).contains(EventNotifier::SUBSCRIBE_TO_EVENTS) { let object_id = node.node_id(); - if let Some(events) = event_filter::evaluate(&object_id, filter, address_space, &happened_since, self.client_handle) { - events.into_iter().for_each(|event| self.enqueue_notification_message(event)); + if let Some(events) = event_filter::evaluate( + &object_id, + filter, + address_space, + &happened_since, + self.client_handle, + ) { + events + .into_iter() + .for_each(|event| self.enqueue_notification_message(event)); true } else { false @@ -262,12 +316,23 @@ impl MonitoredItem { false } } - _ => panic!() + _ => panic!(), } } - fn check_for_data_change(&mut self, _address_space: &AddressSpace, resend_data: bool, attribute_id: AttributeId, node: &dyn Node) -> bool { - let data_value = node.get_attribute(TimestampsToReturn::Neither, attribute_id, NumericRange::None, &QualifiedName::null()); + fn check_for_data_change( + &mut self, + _address_space: &AddressSpace, + resend_data: bool, + attribute_id: AttributeId, + node: &dyn Node, + ) -> bool { + let data_value = node.get_attribute( + TimestampsToReturn::Neither, + attribute_id, + NumericRange::None, + &QualifiedName::null(), + ); if let Some(mut data_value) = data_value { // Test for data change let data_change = if resend_data { @@ -277,9 +342,7 @@ impl MonitoredItem { // if the value is considered to have changed, otherwise it is a straight // equality test. match self.filter { - FilterType::None => { - data_value.value != last_data_value.value - } + FilterType::None => data_value.value != last_data_value.value, FilterType::DataChangeFilter(ref filter) => { !filter.compare(&data_value, last_data_value, None) } @@ -290,11 +353,18 @@ impl MonitoredItem { } } else { // There is no previous data value so yes consider it changed - trace!("No last data value so item has changed, node {:?}", self.item_to_monitor.node_id); + trace!( + "No last data value so item has changed, node {:?}", + self.item_to_monitor.node_id + ); true }; if data_change { - trace!("Data change on item -, node {:?}, data_value = {:?}", self.item_to_monitor.node_id, data_value); + trace!( + "Data change on item -, node {:?}, data_value = {:?}", + self.item_to_monitor.node_id, + data_value + ); // Store current data value to compare against on the next tick self.last_data_value = Some(data_value.clone()); @@ -329,7 +399,10 @@ impl MonitoredItem { trace!("Monitored item state = {:?}", self); } else { - trace!("No data change on item, node {:?}", self.item_to_monitor.node_id); + trace!( + "No data change on item, node {:?}", + self.item_to_monitor.node_id + ); } data_change } else { @@ -340,7 +413,7 @@ impl MonitoredItem { fn is_event_filter(&self) -> bool { match self.filter { FilterType::EventFilter(_) => true, - _ => false + _ => false, } } @@ -349,7 +422,12 @@ impl MonitoredItem { /// check, the latest value and its timestamps will be stored in the monitored item. /// /// The function will return true if the value was changed, false otherwise. - pub fn check_value(&mut self, address_space: &AddressSpace, now: &DateTimeUtc, resend_data: bool) -> bool { + pub fn check_value( + &mut self, + address_space: &AddressSpace, + now: &DateTimeUtc, + resend_data: bool, + ) -> bool { if self.monitoring_mode == MonitoringMode::Disabled { panic!("Should not check value while monitoring mode is disabled"); } @@ -367,18 +445,28 @@ impl MonitoredItem { false } } - _ => { - self.check_for_data_change(address_space, resend_data, attribute_id, node) - } + _ => self.check_for_data_change( + address_space, + resend_data, + attribute_id, + node, + ), } } Err(_) => { - trace!("Item has no attribute_id {} so it hasn't changed, node {:?}", self.item_to_monitor.attribute_id, self.item_to_monitor.node_id); + trace!( + "Item has no attribute_id {} so it hasn't changed, node {:?}", + self.item_to_monitor.attribute_id, + self.item_to_monitor.node_id + ); false } } } else { - trace!("Cannot find item to monitor, node {:?}", self.item_to_monitor.node_id); + trace!( + "Cannot find item to monitor, node {:?}", + self.item_to_monitor.node_id + ); false }; self.last_sample_time = *now; @@ -386,10 +474,16 @@ impl MonitoredItem { } /// Enqueues a notification message for the monitored item - pub fn enqueue_notification_message(&mut self, notification: T) where T: Into { + pub fn enqueue_notification_message(&mut self, notification: T) + where + T: Into, + { // test for overflow let overflow = if self.notification_queue.len() == self.queue_size { - trace!("Data change overflow, node {:?}", self.item_to_monitor.node_id); + trace!( + "Data change overflow, node {:?}", + self.item_to_monitor.node_id + ); // Overflow behaviour if self.discard_oldest { // Throw away oldest item (the one at the start) to make space at the end @@ -407,7 +501,8 @@ impl MonitoredItem { if overflow { if let Notification::MonitoredItemNotification(ref mut notification) = notification { // Set the overflow bit on the data value's status - notification.value.status = Some(notification.value.status() | StatusCode::OVERFLOW); + notification.value.status = + Some(notification.value.status() | StatusCode::OVERFLOW); } self.queue_overflow = true; } @@ -443,7 +538,9 @@ impl MonitoredItem { // From spec "any negative number is interpreted as -1" // -1 means monitored item's sampling interval defaults to the subscription's publishing interval -1.0 - } else if requested_sampling_interval == 0.0 || requested_sampling_interval < constants::MIN_SAMPLING_INTERVAL { + } else if requested_sampling_interval == 0.0 + || requested_sampling_interval < constants::MIN_SAMPLING_INTERVAL + { constants::MIN_SAMPLING_INTERVAL } else { requested_sampling_interval @@ -455,14 +552,14 @@ impl MonitoredItem { if requested_queue_size == 0 { // For data monitored items 0 -> 1 1 - // Future - for event monitored items, queue size should be the default queue size for event notifications + // Future - for event monitored items, queue size should be the default queue size for event notifications } else if requested_queue_size == 1 { 1 - // Future - for event monitored items, the minimum queue size the server requires for event notifications + // Future - for event monitored items, the minimum queue size the server requires for event notifications } else if requested_queue_size > constants::MAX_DATA_CHANGE_QUEUE_SIZE { constants::MAX_DATA_CHANGE_QUEUE_SIZE - // Future - for event monitored items MaxUInt32 returns the maximum queue size the server support - // for event notifications + // Future - for event monitored items MaxUInt32 returns the maximum queue size the server support + // for event notifications } else { requested_queue_size } @@ -510,4 +607,4 @@ impl MonitoredItem { pub(crate) fn set_discard_oldest(&mut self, discard_oldest: bool) { self.discard_oldest = discard_oldest; } -} \ No newline at end of file +} diff --git a/server/src/subscriptions/subscription.rs b/server/src/subscriptions/subscription.rs index 4176f2256..7d52825b2 100644 --- a/server/src/subscriptions/subscription.rs +++ b/server/src/subscriptions/subscription.rs @@ -2,26 +2,27 @@ // SPDX-License-Identifier: MPL-2.0 // Copyright (C) 2017-2020 Adam Lock -use std::collections::{HashMap, BTreeSet, VecDeque}; +use std::collections::{BTreeSet, HashMap, VecDeque}; use std::sync::{Arc, RwLock}; use chrono; use opcua_types::{ - *, - status_code::StatusCode, service_types::{ - TimestampsToReturn, NotificationMessage, MonitoredItemCreateRequest, MonitoredItemCreateResult, MonitoredItemModifyRequest, MonitoredItemModifyResult, + MonitoredItemCreateRequest, MonitoredItemCreateResult, MonitoredItemModifyRequest, + MonitoredItemModifyResult, NotificationMessage, TimestampsToReturn, }, + status_code::StatusCode, + *, }; use opcua_core::handle::Handle; use crate::{ - constants, - subscriptions::monitored_item::{MonitoredItem, TickResult, Notification}, address_space::AddressSpace, + constants, diagnostics::ServerDiagnostics, + subscriptions::monitored_item::{MonitoredItem, Notification, TickResult}, }; /// The state of the subscription @@ -88,7 +89,10 @@ pub(crate) struct UpdateStateResult { } impl UpdateStateResult { - pub fn new(handled_state: HandledState, update_state_action: UpdateStateAction) -> UpdateStateResult { + pub fn new( + handled_state: HandledState, + update_state_action: UpdateStateAction, + ) -> UpdateStateResult { UpdateStateResult { handled_state, update_state_action, @@ -167,7 +171,15 @@ impl Drop for Subscription { } impl Subscription { - pub fn new(diagnostics: Arc>, subscription_id: u32, publishing_enabled: bool, publishing_interval: Duration, lifetime_counter: u32, keep_alive_counter: u32, priority: u8) -> Subscription { + pub fn new( + diagnostics: Arc>, + subscription_id: u32, + publishing_enabled: bool, + publishing_interval: Duration, + lifetime_counter: u32, + keep_alive_counter: u32, + priority: u8, + ) -> Subscription { let subscription = Subscription { subscription_id, publishing_interval, @@ -218,88 +230,128 @@ impl Subscription { } /// Creates monitored items on the specified subscription, returning the creation results - pub fn create_monitored_items(&mut self, address_space: &AddressSpace, now: &DateTimeUtc, timestamps_to_return: TimestampsToReturn, items_to_create: &[MonitoredItemCreateRequest], max_monitored_items_per_sub: usize) -> Vec { + pub fn create_monitored_items( + &mut self, + address_space: &AddressSpace, + now: &DateTimeUtc, + timestamps_to_return: TimestampsToReturn, + items_to_create: &[MonitoredItemCreateRequest], + max_monitored_items_per_sub: usize, + ) -> Vec { self.reset_lifetime_counter(); // Add items to the subscription if they're not already in its - items_to_create.iter().map(|item_to_create| { - if !address_space.node_exists(&item_to_create.item_to_monitor.node_id) { - Self::monitored_item_create_error(StatusCode::BadNodeIdUnknown) - } else { - - // TODO validate the attribute id for the type of node - // TODO validate the index range for the node - - // Create a monitored item, if possible - let monitored_item_id = self.next_monitored_item_id; - match MonitoredItem::new(now, monitored_item_id, timestamps_to_return, item_to_create) { - Ok(monitored_item) => { - if max_monitored_items_per_sub == 0 || self.monitored_items.len() <= max_monitored_items_per_sub { - let revised_sampling_interval = monitored_item.sampling_interval(); - let revised_queue_size = monitored_item.queue_size() as u32; - // Validate the filter before registering the item - match monitored_item.validate_filter(address_space) { - Ok(filter_result) => { - // Register the item with the subscription - self.monitored_items.insert(monitored_item_id, monitored_item); - self.next_monitored_item_id += 1; - MonitoredItemCreateResult { - status_code: StatusCode::Good, - monitored_item_id, - revised_sampling_interval, - revised_queue_size, - filter_result, + items_to_create + .iter() + .map(|item_to_create| { + if !address_space.node_exists(&item_to_create.item_to_monitor.node_id) { + Self::monitored_item_create_error(StatusCode::BadNodeIdUnknown) + } else { + // TODO validate the attribute id for the type of node + // TODO validate the index range for the node + + // Create a monitored item, if possible + let monitored_item_id = self.next_monitored_item_id; + match MonitoredItem::new( + now, + monitored_item_id, + timestamps_to_return, + item_to_create, + ) { + Ok(monitored_item) => { + if max_monitored_items_per_sub == 0 + || self.monitored_items.len() <= max_monitored_items_per_sub + { + let revised_sampling_interval = monitored_item.sampling_interval(); + let revised_queue_size = monitored_item.queue_size() as u32; + // Validate the filter before registering the item + match monitored_item.validate_filter(address_space) { + Ok(filter_result) => { + // Register the item with the subscription + self.monitored_items + .insert(monitored_item_id, monitored_item); + self.next_monitored_item_id += 1; + MonitoredItemCreateResult { + status_code: StatusCode::Good, + monitored_item_id, + revised_sampling_interval, + revised_queue_size, + filter_result, + } + } + Err(status_code) => { + Self::monitored_item_create_error(status_code) } } - Err(status_code) => Self::monitored_item_create_error(status_code) + } else { + // Number of monitored items exceeds limit per sub + Self::monitored_item_create_error( + StatusCode::BadTooManyMonitoredItems, + ) } - } else { - // Number of monitored items exceeds limit per sub - Self::monitored_item_create_error(StatusCode::BadTooManyMonitoredItems) } + Err(status_code) => Self::monitored_item_create_error(status_code), } - Err(status_code) => Self::monitored_item_create_error(status_code) } - } - }).collect() + }) + .collect() } /// Modify the specified monitored items, returning a result for each - pub fn modify_monitored_items(&mut self, address_space: &AddressSpace, timestamps_to_return: TimestampsToReturn, items_to_modify: &[MonitoredItemModifyRequest]) -> Vec { + pub fn modify_monitored_items( + &mut self, + address_space: &AddressSpace, + timestamps_to_return: TimestampsToReturn, + items_to_modify: &[MonitoredItemModifyRequest], + ) -> Vec { self.reset_lifetime_counter(); - items_to_modify.iter().map(|item_to_modify| { - match self.monitored_items.get_mut(&item_to_modify.monitored_item_id) { - Some(monitored_item) => { - // Try to change the monitored item according to the modify request - let modify_result = monitored_item.modify(address_space, timestamps_to_return, item_to_modify); - match modify_result { - Ok(filter_result) => MonitoredItemModifyResult { - status_code: StatusCode::Good, - revised_sampling_interval: monitored_item.sampling_interval(), - revised_queue_size: monitored_item.queue_size() as u32, - filter_result, - }, - Err(err) => MonitoredItemModifyResult { - status_code: err, - revised_sampling_interval: 0f64, - revised_queue_size: 0, - filter_result: ExtensionObject::null(), + items_to_modify + .iter() + .map(|item_to_modify| { + match self + .monitored_items + .get_mut(&item_to_modify.monitored_item_id) + { + Some(monitored_item) => { + // Try to change the monitored item according to the modify request + let modify_result = monitored_item.modify( + address_space, + timestamps_to_return, + item_to_modify, + ); + match modify_result { + Ok(filter_result) => MonitoredItemModifyResult { + status_code: StatusCode::Good, + revised_sampling_interval: monitored_item.sampling_interval(), + revised_queue_size: monitored_item.queue_size() as u32, + filter_result, + }, + Err(err) => MonitoredItemModifyResult { + status_code: err, + revised_sampling_interval: 0f64, + revised_queue_size: 0, + filter_result: ExtensionObject::null(), + }, } } + // Item does not exist + None => MonitoredItemModifyResult { + status_code: StatusCode::BadMonitoredItemIdInvalid, + revised_sampling_interval: 0f64, + revised_queue_size: 0, + filter_result: ExtensionObject::null(), + }, } - // Item does not exist - None => MonitoredItemModifyResult { - status_code: StatusCode::BadMonitoredItemIdInvalid, - revised_sampling_interval: 0f64, - revised_queue_size: 0, - filter_result: ExtensionObject::null(), - } - } - }).collect() + }) + .collect() } /// Sets the monitoring mode on one monitored item - pub fn set_monitoring_mode(&mut self, monitored_item_id: u32, monitoring_mode: MonitoringMode) -> StatusCode { + pub fn set_monitoring_mode( + &mut self, + monitored_item_id: u32, + monitoring_mode: MonitoringMode, + ) -> StatusCode { if let Some(monitored_item) = self.monitored_items.get_mut(&monitored_item_id) { monitored_item.set_monitoring_mode(monitoring_mode); StatusCode::Good @@ -311,19 +363,30 @@ impl Subscription { /// Delete the specified monitored items (by item id), returning a status code for each pub fn delete_monitored_items(&mut self, items_to_delete: &[u32]) -> Vec { self.reset_lifetime_counter(); - items_to_delete.iter().map(|item_to_delete| { - match self.monitored_items.remove(item_to_delete) { - Some(_) => StatusCode::Good, - None => StatusCode::BadMonitoredItemIdInvalid - } - }).collect() + items_to_delete + .iter() + .map( + |item_to_delete| match self.monitored_items.remove(item_to_delete) { + Some(_) => StatusCode::Good, + None => StatusCode::BadMonitoredItemIdInvalid, + }, + ) + .collect() } // Returns two vecs representing the server and client handles for each monitored item. // Called from the GetMonitoredItems impl pub fn get_handles(&self) -> (Vec, Vec) { - let server_handles = self.monitored_items.values().map(|i| i.monitored_item_id()).collect(); - let client_handles = self.monitored_items.values().map(|i| i.client_handle()).collect(); + let server_handles = self + .monitored_items + .values() + .map(|i| i.monitored_item_id()) + .collect(); + let client_handles = self + .monitored_items + .values() + .map(|i| i.client_handle()) + .collect(); (server_handles, client_handles) } @@ -350,18 +413,24 @@ impl Subscription { /// Checks the subscription and monitored items for state change, messages. Returns `true` /// if there are zero or more notifications waiting to be processed. - pub(crate) fn tick(&mut self, now: &DateTimeUtc, address_space: &AddressSpace, tick_reason: TickReason, publishing_req_queued: bool) { + pub(crate) fn tick( + &mut self, + now: &DateTimeUtc, + address_space: &AddressSpace, + tick_reason: TickReason, + publishing_req_queued: bool, + ) { // Check if the publishing interval has elapsed. Only checks on the tick timer. let publishing_interval_elapsed = match tick_reason { - TickReason::ReceivePublishRequest => { - false - } - TickReason::TickTimerFired => if self.state == SubscriptionState::Creating { - true - } else if self.publishing_interval <= 0f64 { - panic!("Publishing interval should have been revised to min interval") - } else { - self.test_and_set_publishing_interval_elapsed(now) + TickReason::ReceivePublishRequest => false, + TickReason::TickTimerFired => { + if self.state == SubscriptionState::Creating { + true + } else if self.publishing_interval <= 0f64 { + panic!("Publishing interval should have been revised to min interval") + } else { + self.test_and_set_publishing_interval_elapsed(now) + } } }; @@ -373,7 +442,12 @@ impl Subscription { SubscriptionState::Closed | SubscriptionState::Creating => None, _ => { let resend_data = self.resend_data; - self.tick_monitored_items(now, address_space, publishing_interval_elapsed, resend_data) + self.tick_monitored_items( + now, + address_space, + publishing_interval_elapsed, + resend_data, + ) } }; self.resend_data = false; @@ -385,13 +459,19 @@ impl Subscription { // to send or state to update if notifications_available || publishing_interval_elapsed || publishing_req_queued { // Update the internal state of the subscription based on what happened - let update_state_result = self.update_state(tick_reason, SubscriptionStateParams { - publishing_req_queued, - notifications_available, - more_notifications, - publishing_timer_expired: publishing_interval_elapsed, - }); - trace!("subscription tick - update_state_result = {:?}", update_state_result); + let update_state_result = self.update_state( + tick_reason, + SubscriptionStateParams { + publishing_req_queued, + notifications_available, + more_notifications, + publishing_timer_expired: publishing_interval_elapsed, + }, + ); + trace!( + "subscription tick - update_state_result = {:?}", + update_state_result + ); self.handle_state_result(now, update_state_result, notification); } } @@ -399,16 +479,28 @@ impl Subscription { fn enqueue_notification(&mut self, notification: NotificationMessage) { use std::u32; // For sanity, check the sequence number is the expected sequence number. - let expected_sequence_number = if self.last_sequence_number == u32::MAX { 1 } else { self.last_sequence_number + 1 }; + let expected_sequence_number = if self.last_sequence_number == u32::MAX { + 1 + } else { + self.last_sequence_number + 1 + }; if notification.sequence_number != expected_sequence_number { - panic!("Notification's sequence number is not sequential, expecting {}, got {}", expected_sequence_number, notification.sequence_number); + panic!( + "Notification's sequence number is not sequential, expecting {}, got {}", + expected_sequence_number, notification.sequence_number + ); } // debug!("Enqueuing notification {:?}", notification); self.last_sequence_number = notification.sequence_number; self.notifications.push_back(notification); } - fn handle_state_result(&mut self, now: &DateTimeUtc, update_state_result: UpdateStateResult, notification: Option) { + fn handle_state_result( + &mut self, + now: &DateTimeUtc, + update_state_result: UpdateStateResult, + notification: Option, + ) { // Now act on the state's action match update_state_result.update_state_action { UpdateStateAction::None => { @@ -429,7 +521,10 @@ impl Subscription { } // Send a keep alive debug!("Sending keep alive response"); - let notification = NotificationMessage::keep_alive(self.sequence_number.next(), DateTime::from(now.clone())); + let notification = NotificationMessage::keep_alive( + self.sequence_number.next(), + DateTime::from(now.clone()), + ); self.enqueue_notification(notification); } UpdateStateAction::ReturnNotifications => { @@ -443,8 +538,8 @@ impl Subscription { panic!("SubscriptionCreated got a notification"); } // Subscription was created successfully -// let notification = NotificationMessage::status_change(self.sequence_number.next(), DateTime::from(now.clone()), StatusCode::Good); -// self.enqueue_notification(notification); + // let notification = NotificationMessage::status_change(self.sequence_number.next(), DateTime::from(now.clone()), StatusCode::Good); + // self.enqueue_notification(notification); } UpdateStateAction::SubscriptionExpired => { if notification.is_some() { @@ -453,7 +548,11 @@ impl Subscription { // Delete the monitored items, issue a status change for the subscription debug!("Subscription status change to closed / timeout"); self.monitored_items.clear(); - let notification = NotificationMessage::status_change(self.sequence_number.next(), DateTime::from(now.clone()), StatusCode::BadTimeout); + let notification = NotificationMessage::status_change( + self.sequence_number.next(), + DateTime::from(now.clone()), + StatusCode::BadTimeout, + ); self.enqueue_notification(notification); } } @@ -485,7 +584,11 @@ impl Subscription { // * Update state action - none, return notifications, return keep alive // * Publishing request action - nothing, dequeue // - pub(crate) fn update_state(&mut self, tick_reason: TickReason, p: SubscriptionStateParams) -> UpdateStateResult { + pub(crate) fn update_state( + &mut self, + tick_reason: TickReason, + p: SubscriptionStateParams, + ) -> UpdateStateResult { // This function is called when a publish request is received OR the timer expired, so getting // both is invalid code somewhere if tick_reason == TickReason::ReceivePublishRequest && p.publishing_timer_expired { @@ -496,17 +599,22 @@ impl Subscription { { use log::Level::Trace; if log_enabled!(Trace) { - trace!(r#"State inputs: + trace!( + r#"State inputs: subscription_id: {} / state: {:?} tick_reason: {:?} / state_params: {:?} publishing_enabled: {} keep_alive_counter / lifetime_counter: {} / {} message_sent: {}"#, - self.subscription_id, self.state, tick_reason, p, - self.publishing_enabled, - self.keep_alive_counter, - self.lifetime_counter, - self.first_message_sent); + self.subscription_id, + self.state, + tick_reason, + p, + self.publishing_enabled, + self.keep_alive_counter, + self.lifetime_counter, + self.first_message_sent + ); } } @@ -518,14 +626,16 @@ impl Subscription { // uses what its given. Likewise, this function does not "send" notifications, rather // it returns them (if any) and it is up to the caller to send them - // more state tests that match on more than one state match self.state { SubscriptionState::Normal | SubscriptionState::Late | SubscriptionState::KeepAlive => { if self.lifetime_counter == 1 { // State #27 self.state = SubscriptionState::Closed; - return UpdateStateResult::new(HandledState::Closed27, UpdateStateAction::SubscriptionExpired); + return UpdateStateResult::new( + HandledState::Closed27, + UpdateStateAction::SubscriptionExpired, + ); } } _ => { @@ -541,55 +651,110 @@ impl Subscription { // State #3 self.state = SubscriptionState::Normal; self.first_message_sent = false; - return UpdateStateResult::new(HandledState::Create3, UpdateStateAction::SubscriptionCreated); + return UpdateStateResult::new( + HandledState::Create3, + UpdateStateAction::SubscriptionCreated, + ); } SubscriptionState::Normal => { - if tick_reason == TickReason::ReceivePublishRequest && (!self.publishing_enabled || (self.publishing_enabled && !p.more_notifications)) { + if tick_reason == TickReason::ReceivePublishRequest + && (!self.publishing_enabled + || (self.publishing_enabled && !p.more_notifications)) + { // State #4 return UpdateStateResult::new(HandledState::Normal4, UpdateStateAction::None); - } else if tick_reason == TickReason::ReceivePublishRequest && self.publishing_enabled && p.more_notifications { + } else if tick_reason == TickReason::ReceivePublishRequest + && self.publishing_enabled + && p.more_notifications + { // State #5 self.reset_lifetime_counter(); self.first_message_sent = true; - return UpdateStateResult::new(HandledState::Normal5, UpdateStateAction::ReturnNotifications); - } else if p.publishing_timer_expired && p.publishing_req_queued && self.publishing_enabled && p.notifications_available { + return UpdateStateResult::new( + HandledState::Normal5, + UpdateStateAction::ReturnNotifications, + ); + } else if p.publishing_timer_expired + && p.publishing_req_queued + && self.publishing_enabled + && p.notifications_available + { // State #6 self.reset_lifetime_counter(); self.start_publishing_timer(); self.first_message_sent = true; - return UpdateStateResult::new(HandledState::IntervalElapsed6, UpdateStateAction::ReturnNotifications); - } else if p.publishing_timer_expired && p.publishing_req_queued && !self.first_message_sent && (!self.publishing_enabled || (self.publishing_enabled && !p.notifications_available)) { + return UpdateStateResult::new( + HandledState::IntervalElapsed6, + UpdateStateAction::ReturnNotifications, + ); + } else if p.publishing_timer_expired + && p.publishing_req_queued + && !self.first_message_sent + && (!self.publishing_enabled + || (self.publishing_enabled && !p.notifications_available)) + { // State #7 self.reset_lifetime_counter(); self.start_publishing_timer(); self.first_message_sent = true; - return UpdateStateResult::new(HandledState::IntervalElapsed7, UpdateStateAction::ReturnKeepAlive); - } else if p.publishing_timer_expired && !p.publishing_req_queued && (!self.first_message_sent || (self.publishing_enabled && p.notifications_available)) { + return UpdateStateResult::new( + HandledState::IntervalElapsed7, + UpdateStateAction::ReturnKeepAlive, + ); + } else if p.publishing_timer_expired + && !p.publishing_req_queued + && (!self.first_message_sent + || (self.publishing_enabled && p.notifications_available)) + { // State #8 self.start_publishing_timer(); self.state = SubscriptionState::Late; - return UpdateStateResult::new(HandledState::IntervalElapsed8, UpdateStateAction::None); - } else if p.publishing_timer_expired && self.first_message_sent && (!self.publishing_enabled || (self.publishing_enabled && !p.notifications_available)) { + return UpdateStateResult::new( + HandledState::IntervalElapsed8, + UpdateStateAction::None, + ); + } else if p.publishing_timer_expired + && self.first_message_sent + && (!self.publishing_enabled + || (self.publishing_enabled && !p.notifications_available)) + { // State #9 self.start_publishing_timer(); self.reset_keep_alive_counter(); self.state = SubscriptionState::KeepAlive; - return UpdateStateResult::new(HandledState::IntervalElapsed9, UpdateStateAction::None); + return UpdateStateResult::new( + HandledState::IntervalElapsed9, + UpdateStateAction::None, + ); } } SubscriptionState::Late => { - if tick_reason == TickReason::ReceivePublishRequest && self.publishing_enabled && (p.notifications_available || p.more_notifications) { + if tick_reason == TickReason::ReceivePublishRequest + && self.publishing_enabled + && (p.notifications_available || p.more_notifications) + { // State #10 self.reset_lifetime_counter(); self.state = SubscriptionState::Normal; self.first_message_sent = true; - return UpdateStateResult::new(HandledState::Late10, UpdateStateAction::ReturnNotifications); - } else if tick_reason == TickReason::ReceivePublishRequest && (!self.publishing_enabled || (self.publishing_enabled && !p.notifications_available && !p.more_notifications)) { + return UpdateStateResult::new( + HandledState::Late10, + UpdateStateAction::ReturnNotifications, + ); + } else if tick_reason == TickReason::ReceivePublishRequest + && (!self.publishing_enabled + || (self.publishing_enabled + && !p.notifications_available + && !p.more_notifications)) + { // State #11 self.reset_lifetime_counter(); self.state = SubscriptionState::KeepAlive; self.first_message_sent = true; - return UpdateStateResult::new(HandledState::Late11, UpdateStateAction::ReturnKeepAlive); + return UpdateStateResult::new( + HandledState::Late11, + UpdateStateAction::ReturnKeepAlive, + ); } else if p.publishing_timer_expired { // State #12 self.start_publishing_timer(); @@ -599,27 +764,61 @@ impl Subscription { SubscriptionState::KeepAlive => { if tick_reason == TickReason::ReceivePublishRequest { // State #13 - return UpdateStateResult::new(HandledState::KeepAlive13, UpdateStateAction::None); - } else if p.publishing_timer_expired && self.publishing_enabled && p.notifications_available && p.publishing_req_queued { + return UpdateStateResult::new( + HandledState::KeepAlive13, + UpdateStateAction::None, + ); + } else if p.publishing_timer_expired + && self.publishing_enabled + && p.notifications_available + && p.publishing_req_queued + { // State #14 self.first_message_sent = true; self.state = SubscriptionState::Normal; - return UpdateStateResult::new(HandledState::KeepAlive14, UpdateStateAction::ReturnNotifications); - } else if p.publishing_timer_expired && p.publishing_req_queued && self.keep_alive_counter == 1 && (!self.publishing_enabled || (self.publishing_enabled && p.notifications_available)) { + return UpdateStateResult::new( + HandledState::KeepAlive14, + UpdateStateAction::ReturnNotifications, + ); + } else if p.publishing_timer_expired + && p.publishing_req_queued + && self.keep_alive_counter == 1 + && (!self.publishing_enabled + || (self.publishing_enabled && p.notifications_available)) + { // State #15 self.start_publishing_timer(); self.reset_keep_alive_counter(); - return UpdateStateResult::new(HandledState::KeepAlive15, UpdateStateAction::ReturnKeepAlive); - } else if p.publishing_timer_expired && self.keep_alive_counter > 1 && (!self.publishing_enabled || (self.publishing_enabled && !p.notifications_available)) { + return UpdateStateResult::new( + HandledState::KeepAlive15, + UpdateStateAction::ReturnKeepAlive, + ); + } else if p.publishing_timer_expired + && self.keep_alive_counter > 1 + && (!self.publishing_enabled + || (self.publishing_enabled && !p.notifications_available)) + { // State #16 self.start_publishing_timer(); self.keep_alive_counter -= 1; - return UpdateStateResult::new(HandledState::KeepAlive16, UpdateStateAction::None); - } else if p.publishing_timer_expired && !p.publishing_req_queued && (self.keep_alive_counter == 1 || (self.keep_alive_counter > 1 && self.publishing_enabled && p.notifications_available)) { + return UpdateStateResult::new( + HandledState::KeepAlive16, + UpdateStateAction::None, + ); + } else if p.publishing_timer_expired + && !p.publishing_req_queued + && (self.keep_alive_counter == 1 + || (self.keep_alive_counter > 1 + && self.publishing_enabled + && p.notifications_available)) + { // State #17 self.start_publishing_timer(); self.state = SubscriptionState::Late; - return UpdateStateResult::new(HandledState::KeepAlive17, UpdateStateAction::None); + return UpdateStateResult::new( + HandledState::KeepAlive17, + UpdateStateAction::None, + ); } } _ => { @@ -637,14 +836,21 @@ impl Subscription { /// /// The function returns a `notifications` and a `more_notifications` boolean to indicate if the notifications /// are available. - fn tick_monitored_items(&mut self, now: &DateTimeUtc, address_space: &AddressSpace, publishing_interval_elapsed: bool, resend_data: bool) -> Option { + fn tick_monitored_items( + &mut self, + now: &DateTimeUtc, + address_space: &AddressSpace, + publishing_interval_elapsed: bool, + resend_data: bool, + ) -> Option { let mut triggered_items: BTreeSet = BTreeSet::new(); let mut monitored_item_notifications = Vec::with_capacity(self.monitored_items.len() * 2); for (_, monitored_item) in &mut self.monitored_items { // If this returns true then the monitored item wants to report its notification let monitoring_mode = monitored_item.monitoring_mode(); - match monitored_item.tick(now, address_space, publishing_interval_elapsed, resend_data) { + match monitored_item.tick(now, address_space, publishing_interval_elapsed, resend_data) + { TickResult::ReportValueChanged => { if publishing_interval_elapsed { // If this monitored item has triggered items, then they need to be handled @@ -663,7 +869,9 @@ impl Subscription { } } // Take some / all of the monitored item's pending notifications - if let Some(mut item_notification_messages) = monitored_item.all_notifications() { + if let Some(mut item_notification_messages) = + monitored_item.all_notifications() + { monitored_item_notifications.append(&mut item_notification_messages); } } @@ -725,27 +933,49 @@ impl Subscription { } }); - // Produce a data change notification if !monitored_item_notifications.is_empty() { let next_sequence_number = self.sequence_number.next(); - trace!("Create notification for subscription {}, sequence number {}", self.subscription_id, next_sequence_number); + trace!( + "Create notification for subscription {}, sequence number {}", + self.subscription_id, + next_sequence_number + ); // Collect all datachange notifications - let data_change_notifications = monitored_item_notifications.iter() + let data_change_notifications = monitored_item_notifications + .iter() .filter(|v| matches!(v, Notification::MonitoredItemNotification(_))) - .map(|v| if let Notification::MonitoredItemNotification(v) = v { v.clone() } else { panic!() }) + .map(|v| { + if let Notification::MonitoredItemNotification(v) = v { + v.clone() + } else { + panic!() + } + }) .collect(); // Collect event notifications - let event_notifications = monitored_item_notifications.iter() + let event_notifications = monitored_item_notifications + .iter() .filter(|v| matches!(v, Notification::Event(_))) - .map(|v| if let Notification::Event(v) = v { v.clone() } else { panic!() }) + .map(|v| { + if let Notification::Event(v) = v { + v.clone() + } else { + panic!() + } + }) .collect(); // Make a notification - let notification = NotificationMessage::data_change(next_sequence_number, DateTime::from(now.clone()), data_change_notifications, event_notifications); + let notification = NotificationMessage::data_change( + next_sequence_number, + DateTime::from(now.clone()), + data_change_notifications, + event_notifications, + ); Some(notification) } else { None @@ -854,14 +1084,29 @@ impl Subscription { self.diagnostics_on_drop = diagnostics_on_drop; } - fn validate_triggered_items(&self, monitored_item_id: u32, items: &[u32]) -> (Vec, Vec) { + fn validate_triggered_items( + &self, + monitored_item_id: u32, + items: &[u32], + ) -> (Vec, Vec) { // Monitored items can only trigger on other items in the subscription that exist - let is_good_monitored_item = |i| { self.monitored_items.contains_key(i) && *i != monitored_item_id }; - let is_good_monitored_item_result = |i| { if is_good_monitored_item(i) { StatusCode::Good } else { StatusCode::BadMonitoredItemIdInvalid } }; + let is_good_monitored_item = + |i| self.monitored_items.contains_key(i) && *i != monitored_item_id; + let is_good_monitored_item_result = |i| { + if is_good_monitored_item(i) { + StatusCode::Good + } else { + StatusCode::BadMonitoredItemIdInvalid + } + }; // Find monitored items that do or do not exist let results: Vec = items.iter().map(is_good_monitored_item_result).collect(); - let items: Vec = items.iter().filter(|i| is_good_monitored_item(i)).map(|i| *i).collect(); + let items: Vec = items + .iter() + .filter(|i| is_good_monitored_item(i)) + .map(|i| *i) + .collect(); (results, items) } @@ -869,10 +1114,17 @@ impl Subscription { /// Sets the triggering monitored items on a subscription. This function will validate that /// the items to add / remove actually exist and will only pass through existing monitored items /// onto the monitored item itself. - pub(crate) fn set_triggering(&mut self, monitored_item_id: u32, items_to_add: &[u32], items_to_remove: &[u32]) -> Result<(Vec, Vec), StatusCode> { + pub(crate) fn set_triggering( + &mut self, + monitored_item_id: u32, + items_to_add: &[u32], + items_to_remove: &[u32], + ) -> Result<(Vec, Vec), StatusCode> { // Find monitored items that do or do not exist - let (add_results, items_to_add) = self.validate_triggered_items(monitored_item_id, items_to_add); - let (remove_results, items_to_remove) = self.validate_triggered_items(monitored_item_id, items_to_remove); + let (add_results, items_to_add) = + self.validate_triggered_items(monitored_item_id, items_to_add); + let (remove_results, items_to_remove) = + self.validate_triggered_items(monitored_item_id, items_to_remove); if let Some(ref mut monitored_item) = self.monitored_items.get_mut(&monitored_item_id) { // Set the triggering monitored items @@ -884,4 +1136,4 @@ impl Subscription { Err(StatusCode::BadMonitoredItemIdInvalid) } } -} \ No newline at end of file +} diff --git a/server/src/subscriptions/subscriptions.rs b/server/src/subscriptions/subscriptions.rs index e9562bd8c..a05556bbf 100644 --- a/server/src/subscriptions/subscriptions.rs +++ b/server/src/subscriptions/subscriptions.rs @@ -7,16 +7,16 @@ use std::collections::{BTreeMap, VecDeque}; use time; use opcua_types::{ - *, service_types::{NotificationMessage, PublishRequest, PublishResponse, ServiceFault}, status_code::StatusCode, + *, }; use crate::{ address_space::types::AddressSpace, subscriptions::{ - PublishRequestEntry, PublishResponseEntry, subscription::{Subscription, TickReason}, + PublishRequestEntry, PublishResponseEntry, }, }; @@ -56,7 +56,11 @@ pub struct Metrics { impl Subscriptions { pub fn new(max_subscriptions: usize, publish_request_timeout: i64) -> Subscriptions { - let max_publish_requests = if max_subscriptions > 0 { 2 * max_subscriptions } else { 100 }; + let max_publish_requests = if max_subscriptions > 0 { + 2 * max_subscriptions + } else { + 100 + }; Subscriptions { publish_request_queue: VecDeque::with_capacity(max_publish_requests), publish_response_queue: VecDeque::with_capacity(max_publish_requests), @@ -69,11 +73,15 @@ impl Subscriptions { pub(crate) fn metrics(&self) -> Metrics { // Subscriptions - let subscriptions = self.subscriptions().iter().map(|subscription_pair| { - let mut subscription = subscription_pair.1.clone(); - subscription.set_diagnostics_on_drop(false); - subscription - }).collect(); + let subscriptions = self + .subscriptions() + .iter() + .map(|subscription_pair| { + let mut subscription = subscription_pair.1.clone(); + subscription.set_diagnostics_on_drop(false); + subscription + }) + .collect(); Metrics { subscriptions, publish_request_queue_len: self.publish_request_queue.len(), @@ -94,7 +102,9 @@ impl Subscriptions { } #[cfg(test)] - pub(crate) fn retransmission_queue(&mut self) -> &mut BTreeMap<(u32, u32), NotificationMessage> { + pub(crate) fn retransmission_queue( + &mut self, + ) -> &mut BTreeMap<(u32, u32), NotificationMessage> { &mut self.retransmission_queue } @@ -121,7 +131,13 @@ impl Subscriptions { /// /// If the queue is full this call will pop the oldest and generate a service fault /// for that before pushing the new one. - pub(crate) fn enqueue_publish_request(&mut self, now: &DateTimeUtc, request_id: u32, request: PublishRequest, address_space: &AddressSpace) -> Result<(), StatusCode> { + pub(crate) fn enqueue_publish_request( + &mut self, + now: &DateTimeUtc, + request_id: u32, + request: PublishRequest, + address_space: &AddressSpace, + ) -> Result<(), StatusCode> { // Check if we have too requests waiting already let max_publish_requests = self.max_publish_requests(); if self.publish_request_queue.len() >= max_publish_requests { @@ -131,7 +147,11 @@ impl Subscriptions { // Enqueue request or return error if self.publish_request_queue.len() >= max_publish_requests { - error!("Too many publish requests {} for capacity {}", self.publish_request_queue.len(), max_publish_requests); + error!( + "Too many publish requests {} for capacity {}", + self.publish_request_queue.len(), + max_publish_requests + ); Err(StatusCode::BadTooManyPublishRequests) } else { // Add to the front of the queue - older items are popped from the back @@ -182,12 +202,24 @@ impl Subscriptions { /// on each in order of priority. In each case this could generate data change notifications. Data change /// notifications will be attached to the next available publish response and queued for sending /// to the client. - pub(crate) fn tick(&mut self, now: &DateTimeUtc, address_space: &AddressSpace, tick_reason: TickReason) -> Result<(), StatusCode> { + pub(crate) fn tick( + &mut self, + now: &DateTimeUtc, + address_space: &AddressSpace, + tick_reason: TickReason, + ) -> Result<(), StatusCode> { let subscription_ids = { // Sort subscriptions by priority - let mut subscription_priority: Vec<(u32, u8)> = self.subscriptions.values().map(|v| (v.subscription_id(), v.priority())).collect(); + let mut subscription_priority: Vec<(u32, u8)> = self + .subscriptions + .values() + .map(|v| (v.subscription_id(), v.priority())) + .collect(); subscription_priority.sort_by(|s1, s2| s1.1.cmp(&s2.1)); - subscription_priority.iter().map(|s| s.0).collect::>() + subscription_priority + .iter() + .map(|s| s.0) + .collect::>() }; // Iterate through all subscriptions. If there is a publish request it will be used to @@ -209,7 +241,11 @@ impl Subscriptions { if let Some(notification_message) = subscription.take_notification() { let publish_request = self.publish_request_queue.pop_back().unwrap(); // Consume the publish request and queue the notification onto the transmission queue - self.transmission_queue.push_front((subscription_id, publish_request, notification_message)); + self.transmission_queue.push_front(( + subscription_id, + publish_request, + notification_message, + )); } else { break; } @@ -228,7 +264,8 @@ impl Subscriptions { // responses. while !self.transmission_queue.is_empty() { // Get the oldest notification to send - let (subscription_id, publish_request, notification_message) = self.transmission_queue.pop_back().unwrap(); + let (subscription_id, publish_request, notification_message) = + self.transmission_queue.pop_back().unwrap(); // Search the transmission queue for more notifications from this same subscription let more_notifications = self.more_notifications(subscription_id); @@ -237,10 +274,20 @@ impl Subscriptions { let available_sequence_numbers = self.available_sequence_numbers(subscription_id); // The notification to be sent is now put into the retransmission queue - self.retransmission_queue.insert((subscription_id, notification_message.sequence_number), notification_message.clone()); + self.retransmission_queue.insert( + (subscription_id, notification_message.sequence_number), + notification_message.clone(), + ); // Enqueue a publish response - let response = self.make_publish_response(publish_request, subscription_id, now, notification_message, more_notifications, available_sequence_numbers); + let response = self.make_publish_response( + publish_request, + subscription_id, + now, + notification_message, + more_notifications, + available_sequence_numbers, + ); self.publish_response_queue.push_back(response); } @@ -261,7 +308,8 @@ impl Subscriptions { let publish_request_timeout = self.publish_request_timeout; // Create timeout responses for each expired publish request - let mut expired_publish_responses = VecDeque::with_capacity(self.publish_request_queue.len()); + let mut expired_publish_responses = + VecDeque::with_capacity(self.publish_request_queue.len()); self.publish_request_queue.retain(|ref request| { let request_header = &request.request.request_header; @@ -286,7 +334,8 @@ impl Subscriptions { } }); // Queue responses for each expired request - self.publish_response_queue.append(&mut expired_publish_responses); + self.publish_response_queue + .append(&mut expired_publish_responses); } /// Deletes the acknowledged notifications, returning a list of status code for each according @@ -296,7 +345,10 @@ impl Subscriptions { /// BadSubscriptionIdInvalid - Subscription doesn't exist /// BadSequenceNumberUnknown - Sequence number doesn't exist /// - fn process_subscription_acknowledgements(&mut self, request: &PublishRequest) -> Option> { + fn process_subscription_acknowledgements( + &mut self, + request: &PublishRequest, + ) -> Option> { trace!("Processing subscription acknowledgements"); if let Some(ref subscription_acknowledgements) = request.subscription_acknowledgements { let results = subscription_acknowledgements.iter() @@ -329,7 +381,9 @@ impl Subscriptions { /// subscription id fn more_notifications(&self, subscription_id: u32) -> bool { // At least one match means more notifications - self.transmission_queue.iter().any(|v| v.0 == subscription_id) + self.transmission_queue + .iter() + .any(|v| v.0 == subscription_id) } /// Returns the array of available sequence numbers in the retransmission queue for the specified subscription @@ -338,7 +392,9 @@ impl Subscriptions { None } else { // Find the notifications matching this subscription id in the retransmission queue - let sequence_numbers: Vec = self.retransmission_queue.iter() + let sequence_numbers: Vec = self + .retransmission_queue + .iter() .filter(|&(k, _)| k.0 == subscription_id) .map(|(k, _)| k.1) .collect(); @@ -350,30 +406,50 @@ impl Subscriptions { } } - fn make_publish_response(&self, publish_request: PublishRequestEntry, subscription_id: u32, now: &DateTimeUtc, notification_message: NotificationMessage, more_notifications: bool, available_sequence_numbers: Option>) -> PublishResponseEntry { + fn make_publish_response( + &self, + publish_request: PublishRequestEntry, + subscription_id: u32, + now: &DateTimeUtc, + notification_message: NotificationMessage, + more_notifications: bool, + available_sequence_numbers: Option>, + ) -> PublishResponseEntry { let now = DateTime::from(now.clone()); PublishResponseEntry { request_id: publish_request.request_id, response: PublishResponse { - response_header: ResponseHeader::new_timestamped_service_result(now, &publish_request.request.request_header, StatusCode::Good), + response_header: ResponseHeader::new_timestamped_service_result( + now, + &publish_request.request.request_header, + StatusCode::Good, + ), subscription_id, available_sequence_numbers, more_notifications, notification_message, results: publish_request.results, diagnostic_infos: None, - }.into(), + } + .into(), } } /// Finds a notification message in the retransmission queue matching the supplied subscription id /// and sequence number. Returns `BadSubscriptionIdInvalid` or `BadMessageNotAvailable` if a matching /// notification is not found. - pub fn find_notification_message(&self, subscription_id: u32, sequence_number: u32) -> Result { + pub fn find_notification_message( + &self, + subscription_id: u32, + sequence_number: u32, + ) -> Result { // Look for the subscription if let Some(_) = self.subscriptions.get(&subscription_id) { // Look for the sequence number - if let Some(ref notification_message) = self.retransmission_queue.get(&(subscription_id, sequence_number)) { + if let Some(ref notification_message) = self + .retransmission_queue + .get(&(subscription_id, sequence_number)) + { Ok((*notification_message).clone()) } else { Err(StatusCode::BadMessageNotAvailable) @@ -385,7 +461,11 @@ impl Subscriptions { fn remove_notifications(&mut self, sequence_nrs_to_remove: &[(u32, u32)]) { sequence_nrs_to_remove.into_iter().for_each(|n| { - trace!("Removing notification for subscription {}, sequence nr {}", n.0, n.1); + trace!( + "Removing notification for subscription {}, sequence nr {}", + n.0, + n.1 + ); let _ = self.retransmission_queue.remove(&n); }); } @@ -394,7 +474,9 @@ impl Subscriptions { /// is exceeded. fn remove_old_unacknowledged_notifications(&mut self) { // Strip out notifications for subscriptions that no longer exist - let sequence_nrs_to_remove = self.retransmission_queue.iter() + let sequence_nrs_to_remove = self + .retransmission_queue + .iter() .filter(|(k, _)| !self.subscriptions.contains_key(&k.0)) .map(|(k, _)| *k) .collect::>(); @@ -405,7 +487,9 @@ impl Subscriptions { let max_retransmission_queue = self.max_publish_requests() * 2; if self.retransmission_queue.len() > max_retransmission_queue { let remove_count = self.retransmission_queue.len() - max_retransmission_queue; - let sequence_nrs_to_remove = self.retransmission_queue.iter() + let sequence_nrs_to_remove = self + .retransmission_queue + .iter() .take(remove_count) .map(|(k, _)| *k) .collect::>(); diff --git a/server/src/tests/address_space.rs b/server/src/tests/address_space.rs index f8e54cb8a..52772c28c 100644 --- a/server/src/tests/address_space.rs +++ b/server/src/tests/address_space.rs @@ -1,8 +1,6 @@ use crate::{ address_space::{ - EventNotifier, - references::Reference, - relative_path::find_node_from_browse_path, + references::Reference, relative_path::find_node_from_browse_path, EventNotifier, }, callbacks, prelude::*, @@ -30,7 +28,12 @@ fn namespaces() { let ns = address_space.register_namespace("urn:test").unwrap(); - assert_eq!(address_space.namespace_index("http://opcfoundation.org/UA/").unwrap(), 0u16); + assert_eq!( + address_space + .namespace_index("http://opcfoundation.org/UA/") + .unwrap(), + 0u16 + ); assert_eq!(address_space.namespace_index("urn:test").unwrap(), ns); // Error assert_eq!(address_space.register_namespace(""), Err(())); @@ -73,7 +76,6 @@ fn find_views_folder() { assert!(node_type.is_some()); } - #[test] fn find_common_nodes() { let address_space = AddressSpace::new(); @@ -139,7 +141,6 @@ fn object_attributes() { assert_eq!(o.display_name(), LocalizedText::new("", "Display01")); } - #[test] fn find_node_by_id() { let address_space = make_sample_address_space(); @@ -156,7 +157,10 @@ fn find_node_by_id() { fn dump_references(references: &Vec) { for r in references { - println!("Referencs - type = {:?}, to = {:?}", r.reference_type, r.target_node); + println!( + "Referencs - type = {:?}, to = {:?}", + r.reference_type, r.target_node + ); } } @@ -165,29 +169,50 @@ fn find_references_by_direction() { let address_space = make_sample_address_space(); let address_space = trace_read_lock_unwrap!(address_space); - let (references, _inverse_ref_idx) = address_space.find_references_by_direction::(&NodeId::objects_folder_id(), BrowseDirection::Forward, None); + let (references, _inverse_ref_idx) = address_space + .find_references_by_direction::( + &NodeId::objects_folder_id(), + BrowseDirection::Forward, + None, + ); dump_references(&references); assert_eq!(references.len(), 3); // Should be same as filtering on None let reference_filter = Some((ReferenceTypeId::References, true)); - let (references, _inverse_ref_idx) = address_space.find_references_by_direction(&NodeId::objects_folder_id(), BrowseDirection::Forward, reference_filter); + let (references, _inverse_ref_idx) = address_space.find_references_by_direction( + &NodeId::objects_folder_id(), + BrowseDirection::Forward, + reference_filter, + ); dump_references(&references); assert_eq!(references.len(), 3); // Only organizes let reference_filter = Some((ReferenceTypeId::Organizes, false)); - let (references, _inverse_ref_idx) = address_space.find_references_by_direction(&NodeId::objects_folder_id(), BrowseDirection::Forward, reference_filter); + let (references, _inverse_ref_idx) = address_space.find_references_by_direction( + &NodeId::objects_folder_id(), + BrowseDirection::Forward, + reference_filter, + ); dump_references(&references); assert_eq!(references.len(), 2); // Reverse organises should == 1 (root organises objects) - let (references, _inverse_ref_idx) = address_space.find_references_by_direction(&NodeId::objects_folder_id(), BrowseDirection::Inverse, reference_filter); + let (references, _inverse_ref_idx) = address_space.find_references_by_direction( + &NodeId::objects_folder_id(), + BrowseDirection::Inverse, + reference_filter, + ); dump_references(&references); assert_eq!(references.len(), 1); // Both directions - let (references, inverse_ref_idx) = address_space.find_references_by_direction(&NodeId::objects_folder_id(), BrowseDirection::Both, reference_filter); + let (references, inverse_ref_idx) = address_space.find_references_by_direction( + &NodeId::objects_folder_id(), + BrowseDirection::Both, + reference_filter, + ); dump_references(&references); assert_eq!(references.len(), 3); assert_eq!(inverse_ref_idx, 2); @@ -198,19 +223,26 @@ fn find_references() { let address_space = make_sample_address_space(); let address_space = trace_read_lock_unwrap!(address_space); - let references = address_space.find_references(&NodeId::root_folder_id(), Some((ReferenceTypeId::Organizes, false))); + let references = address_space.find_references( + &NodeId::root_folder_id(), + Some((ReferenceTypeId::Organizes, false)), + ); assert!(references.is_some()); let references = references.as_ref().unwrap(); dump_references(&references); assert_eq!(references.len(), 3); - let references = address_space.find_references::(&NodeId::root_folder_id(), None); + let references = + address_space.find_references::(&NodeId::root_folder_id(), None); assert!(references.is_some()); let references = references.as_ref().unwrap(); dump_references(&references); assert_eq!(references.len(), 4); - let references = address_space.find_references(&NodeId::objects_folder_id(), Some((ReferenceTypeId::Organizes, false))); + let references = address_space.find_references( + &NodeId::objects_folder_id(), + Some((ReferenceTypeId::Organizes, false)), + ); assert!(references.is_some()); let references = references.unwrap(); dump_references(&references); @@ -230,10 +262,16 @@ fn find_inverse_references() { let address_space = trace_read_lock_unwrap!(address_space); //println!("{:#?}", address_space); - let references = address_space.find_inverse_references(&NodeId::root_folder_id(), Some((ReferenceTypeId::Organizes, false))); + let references = address_space.find_inverse_references( + &NodeId::root_folder_id(), + Some((ReferenceTypeId::Organizes, false)), + ); assert!(references.is_none()); - let references = address_space.find_inverse_references(&NodeId::objects_folder_id(), Some((ReferenceTypeId::Organizes, false))); + let references = address_space.find_inverse_references( + &NodeId::objects_folder_id(), + Some((ReferenceTypeId::Organizes, false)), + ); assert!(references.is_some()); let references = references.unwrap(); assert_eq!(references.len(), 1); @@ -246,57 +284,152 @@ fn find_reference_subtypes() { let references = address_space.references(); let reference_types = vec![ - (ReferenceTypeId::References, ReferenceTypeId::HierarchicalReferences), + ( + ReferenceTypeId::References, + ReferenceTypeId::HierarchicalReferences, + ), (ReferenceTypeId::References, ReferenceTypeId::HasChild), (ReferenceTypeId::References, ReferenceTypeId::HasSubtype), (ReferenceTypeId::References, ReferenceTypeId::Organizes), (ReferenceTypeId::References, ReferenceTypeId::Aggregates), (ReferenceTypeId::References, ReferenceTypeId::HasProperty), (ReferenceTypeId::References, ReferenceTypeId::HasComponent), - (ReferenceTypeId::References, ReferenceTypeId::HasOrderedComponent), + ( + ReferenceTypeId::References, + ReferenceTypeId::HasOrderedComponent, + ), (ReferenceTypeId::References, ReferenceTypeId::HasEventSource), (ReferenceTypeId::References, ReferenceTypeId::HasNotifier), (ReferenceTypeId::References, ReferenceTypeId::GeneratesEvent), - (ReferenceTypeId::References, ReferenceTypeId::AlwaysGeneratesEvent), + ( + ReferenceTypeId::References, + ReferenceTypeId::AlwaysGeneratesEvent, + ), (ReferenceTypeId::References, ReferenceTypeId::HasEncoding), - (ReferenceTypeId::References, ReferenceTypeId::HasModellingRule), + ( + ReferenceTypeId::References, + ReferenceTypeId::HasModellingRule, + ), (ReferenceTypeId::References, ReferenceTypeId::HasDescription), - (ReferenceTypeId::References, ReferenceTypeId::HasTypeDefinition), - (ReferenceTypeId::HierarchicalReferences, ReferenceTypeId::HasChild), - (ReferenceTypeId::HierarchicalReferences, ReferenceTypeId::HasSubtype), - (ReferenceTypeId::HierarchicalReferences, ReferenceTypeId::Organizes), - (ReferenceTypeId::HierarchicalReferences, ReferenceTypeId::Aggregates), - (ReferenceTypeId::HierarchicalReferences, ReferenceTypeId::HasProperty), - (ReferenceTypeId::HierarchicalReferences, ReferenceTypeId::HasComponent), - (ReferenceTypeId::HierarchicalReferences, ReferenceTypeId::HasOrderedComponent), - (ReferenceTypeId::HierarchicalReferences, ReferenceTypeId::HasEventSource), - (ReferenceTypeId::HierarchicalReferences, ReferenceTypeId::HasNotifier), + ( + ReferenceTypeId::References, + ReferenceTypeId::HasTypeDefinition, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::HasChild, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::HasSubtype, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::Organizes, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::Aggregates, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::HasProperty, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::HasComponent, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::HasOrderedComponent, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::HasEventSource, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::HasNotifier, + ), (ReferenceTypeId::HasChild, ReferenceTypeId::Aggregates), (ReferenceTypeId::HasChild, ReferenceTypeId::HasComponent), - (ReferenceTypeId::HasChild, ReferenceTypeId::HasHistoricalConfiguration), + ( + ReferenceTypeId::HasChild, + ReferenceTypeId::HasHistoricalConfiguration, + ), (ReferenceTypeId::HasChild, ReferenceTypeId::HasProperty), - (ReferenceTypeId::HasChild, ReferenceTypeId::HasOrderedComponent), + ( + ReferenceTypeId::HasChild, + ReferenceTypeId::HasOrderedComponent, + ), (ReferenceTypeId::HasChild, ReferenceTypeId::HasSubtype), (ReferenceTypeId::Aggregates, ReferenceTypeId::HasComponent), - (ReferenceTypeId::Aggregates, ReferenceTypeId::HasHistoricalConfiguration), + ( + ReferenceTypeId::Aggregates, + ReferenceTypeId::HasHistoricalConfiguration, + ), (ReferenceTypeId::Aggregates, ReferenceTypeId::HasProperty), - (ReferenceTypeId::Aggregates, ReferenceTypeId::HasOrderedComponent), - (ReferenceTypeId::HasComponent, ReferenceTypeId::HasOrderedComponent), - (ReferenceTypeId::HasEventSource, ReferenceTypeId::HasNotifier), - (ReferenceTypeId::HierarchicalReferences, ReferenceTypeId::HasNotifier), - (ReferenceTypeId::References, ReferenceTypeId::NonHierarchicalReferences), - (ReferenceTypeId::NonHierarchicalReferences, ReferenceTypeId::GeneratesEvent), - (ReferenceTypeId::NonHierarchicalReferences, ReferenceTypeId::AlwaysGeneratesEvent), - (ReferenceTypeId::NonHierarchicalReferences, ReferenceTypeId::HasEncoding), - (ReferenceTypeId::NonHierarchicalReferences, ReferenceTypeId::HasModellingRule), - (ReferenceTypeId::NonHierarchicalReferences, ReferenceTypeId::HasDescription), - (ReferenceTypeId::NonHierarchicalReferences, ReferenceTypeId::HasTypeDefinition), - (ReferenceTypeId::GeneratesEvent, ReferenceTypeId::AlwaysGeneratesEvent), + ( + ReferenceTypeId::Aggregates, + ReferenceTypeId::HasOrderedComponent, + ), + ( + ReferenceTypeId::HasComponent, + ReferenceTypeId::HasOrderedComponent, + ), + ( + ReferenceTypeId::HasEventSource, + ReferenceTypeId::HasNotifier, + ), + ( + ReferenceTypeId::HierarchicalReferences, + ReferenceTypeId::HasNotifier, + ), + ( + ReferenceTypeId::References, + ReferenceTypeId::NonHierarchicalReferences, + ), + ( + ReferenceTypeId::NonHierarchicalReferences, + ReferenceTypeId::GeneratesEvent, + ), + ( + ReferenceTypeId::NonHierarchicalReferences, + ReferenceTypeId::AlwaysGeneratesEvent, + ), + ( + ReferenceTypeId::NonHierarchicalReferences, + ReferenceTypeId::HasEncoding, + ), + ( + ReferenceTypeId::NonHierarchicalReferences, + ReferenceTypeId::HasModellingRule, + ), + ( + ReferenceTypeId::NonHierarchicalReferences, + ReferenceTypeId::HasDescription, + ), + ( + ReferenceTypeId::NonHierarchicalReferences, + ReferenceTypeId::HasTypeDefinition, + ), + ( + ReferenceTypeId::GeneratesEvent, + ReferenceTypeId::AlwaysGeneratesEvent, + ), ]; // A type should always match itself - assert!(references.reference_type_matches(&ReferenceTypeId::NonHierarchicalReferences.into(), &ReferenceTypeId::NonHierarchicalReferences.into(), true)); - assert!(references.reference_type_matches(&ReferenceTypeId::NonHierarchicalReferences.into(), &ReferenceTypeId::NonHierarchicalReferences.into(), false)); + assert!(references.reference_type_matches( + &ReferenceTypeId::NonHierarchicalReferences.into(), + &ReferenceTypeId::NonHierarchicalReferences.into(), + true + )); + assert!(references.reference_type_matches( + &ReferenceTypeId::NonHierarchicalReferences.into(), + &ReferenceTypeId::NonHierarchicalReferences.into(), + false + )); // Make sure that subtypes match when subtypes are to be compared and doesn't when they should // not be compared. @@ -313,7 +446,9 @@ fn find_reference_subtypes() { #[test] fn array_as_variable() { // 1 dimensional array with 100 element - let values = (0..100).map(|i| Variant::Int32(i)).collect::>(); + let values = (0..100) + .map(|i| Variant::Int32(i)) + .collect::>(); // Get the variable node back from the address space, ensure that the ValueRank and ArrayDimensions are correct let node_id = NodeId::new(2, 1); @@ -331,7 +466,9 @@ fn array_as_variable() { fn multi_dimension_array_as_variable() { // 2 dimensional array with 10x10 elements - let values = (0..100).map(|i| Variant::Int32(i)).collect::>(); + let values = (0..100) + .map(|i| Variant::Int32(i)) + .collect::>(); let mda = Array::new_multi(values, vec![10u32, 10u32]); assert!(mda.is_valid()); @@ -352,12 +489,20 @@ fn browse_nodes() { // Test that a node can be found let object_id = ObjectId::RootFolder.into(); - let result = find_node_from_browse_path(&address_space, &object_id, &vec!["Objects".into(), "Sample".into(), "v1".into()]); + let result = find_node_from_browse_path( + &address_space, + &object_id, + &vec!["Objects".into(), "Sample".into(), "v1".into()], + ); let node = result.unwrap(); assert_eq!(node.as_node().browse_name(), QualifiedName::from("v1")); // Test that a non existent node cannot be found - let result = find_node_from_browse_path(&address_space, &object_id, &vec!["Objects".into(), "Sample".into(), "vxxx".into()]); + let result = find_node_from_browse_path( + &address_space, + &object_id, + &vec!["Objects".into(), "Sample".into(), "vxxx".into()], + ); assert!(result.is_err()); assert_eq!(result.unwrap_err(), StatusCode::BadNotFound); } @@ -381,12 +526,20 @@ fn object_builder() { // Verify the variable is there let _o = match address_space.find_node(&node_id).unwrap() { NodeType::Object(o) => o, - _ => panic!() + _ => panic!(), }; // Verify the reference to the objects folder is there - assert!(address_space.has_reference(&ObjectId::ObjectsFolder.into(), &node_id, ReferenceTypeId::Organizes)); - assert!(address_space.has_reference(&node_id, &node_type_id, ReferenceTypeId::HasTypeDefinition)); + assert!(address_space.has_reference( + &ObjectId::ObjectsFolder.into(), + &node_id, + ReferenceTypeId::Organizes + )); + assert!(address_space.has_reference( + &node_id, + &node_type_id, + ReferenceTypeId::HasTypeDefinition + )); } #[test] @@ -400,18 +553,21 @@ fn object_type_builder() { let _ot = match address_space.find_node(&node_type_id).unwrap() { NodeType::ObjectType(ot) => ot, - _ => panic!() + _ => panic!(), }; - assert!(address_space.has_reference(&ObjectTypeId::BaseObjectType.into(), &node_type_id, ReferenceTypeId::HasSubtype)); + assert!(address_space.has_reference( + &ObjectTypeId::BaseObjectType.into(), + &node_type_id, + ReferenceTypeId::HasSubtype + )); } #[test] fn variable_builder() { let result = std::panic::catch_unwind(|| { // This should panic - let _v = VariableBuilder::new(&NodeId::null(), "", "") - .build(); + let _v = VariableBuilder::new(&NodeId::null(), "", "").build(); }); assert!(result.is_err()); @@ -439,7 +595,17 @@ fn variable_builder() { assert_eq!(v.value_rank(), 10); assert_eq!(v.array_dimensions().unwrap(), vec![1, 2, 3]); assert_eq!(v.historizing(), true); - assert_eq!(v.value(TimestampsToReturn::Neither, NumericRange::None, &QualifiedName::null(), 0.0).value.unwrap(), Variant::from(999)); + assert_eq!( + v.value( + TimestampsToReturn::Neither, + NumericRange::None, + &QualifiedName::null(), + 0.0 + ) + .value + .unwrap(), + Variant::from(999) + ); assert_eq!(v.minimum_sampling_interval().unwrap(), 123.0); // Add a variable to the address space @@ -460,7 +626,11 @@ fn variable_builder() { // Verify the variable is there assert!(address_space.find_variable_by_ref(&node_id).is_some()); // Verify the reference to the objects folder is there - assert!(address_space.has_reference(&ObjectId::ObjectsFolder.into(), &node_id, ReferenceTypeId::Organizes)); + assert!(address_space.has_reference( + &ObjectId::ObjectsFolder.into(), + &node_id, + ReferenceTypeId::Organizes + )); } #[test] @@ -475,30 +645,40 @@ fn method_builder() { let inserted = MethodBuilder::new(&fn_node_id, "HelloWorld", "HelloWorld") .component_of(object_id.clone()) - .output_args(&mut address_space, &[ - ("Result", DataTypeId::String).into() - ]) + .output_args(&mut address_space, &[("Result", DataTypeId::String).into()]) .callback(Box::new(HelloWorld)) .insert(&mut address_space); assert!(inserted); let method = match address_space.find_node(&fn_node_id).unwrap() { NodeType::Method(m) => m, - _ => panic!() + _ => panic!(), }; assert!(method.has_callback()); - let refs = address_space.find_references(&fn_node_id, Some((ReferenceTypeId::HasProperty, false))).unwrap(); + let refs = address_space + .find_references(&fn_node_id, Some((ReferenceTypeId::HasProperty, false))) + .unwrap(); assert_eq!(refs.len(), 1); - let child = address_space.find_node(&refs.get(0).unwrap().target_node).unwrap(); + let child = address_space + .find_node(&refs.get(0).unwrap().target_node) + .unwrap(); if let NodeType::Variable(v) = child { // verify OutputArguments // verify OutputArguments / Argument value assert_eq!(v.data_type(), DataTypeId::Argument.into()); assert_eq!(v.display_name(), LocalizedText::from("OutputArguments")); - let v = v.value(TimestampsToReturn::Neither, NumericRange::None, &QualifiedName::null(), 0.0).value.unwrap(); + let v = v + .value( + TimestampsToReturn::Neither, + NumericRange::None, + &QualifiedName::null(), + 0.0, + ) + .value + .unwrap(); if let Variant::Array(array) = v { let v = array.values; assert_eq!(v.len(), 1); @@ -526,7 +706,11 @@ fn method_builder() { struct HelloWorld; impl callbacks::Method for HelloWorld { - fn call(&mut self, _session: &mut Session, _request: &CallMethodRequest) -> Result { + fn call( + &mut self, + _session: &mut Session, + _request: &CallMethodRequest, + ) -> Result { Ok(CallMethodResult { status_code: StatusCode::Good, input_argument_results: Some(vec![StatusCode::Good]), @@ -568,7 +752,9 @@ fn simple_delete_node() { assert!(address_space.find_node(&node_id).is_none()); assert!(address_space.find_node(&root_node).is_some()); assert!(!address_space.has_reference(&root_node, &node_id, ReferenceTypeId::Organizes)); - assert!(!address_space.references().reference_to_node_exists(&node_id)); + assert!(!address_space + .references() + .reference_to_node_exists(&node_id)); } #[test] @@ -593,9 +779,21 @@ fn delete_node() { // Verify the object and refs are there assert!(address_space.find_node(&node_id).is_some()); - assert!(address_space.has_reference(&ObjectId::ObjectsFolder.into(), &node_id, ReferenceTypeId::Organizes)); - assert!(!address_space.has_reference(&node_id, &ObjectId::ObjectsFolder.into(), ReferenceTypeId::Organizes)); - assert!(address_space.has_reference(&node_id, &node_type_id, ReferenceTypeId::HasTypeDefinition)); + assert!(address_space.has_reference( + &ObjectId::ObjectsFolder.into(), + &node_id, + ReferenceTypeId::Organizes + )); + assert!(!address_space.has_reference( + &node_id, + &ObjectId::ObjectsFolder.into(), + ReferenceTypeId::Organizes + )); + assert!(address_space.has_reference( + &node_id, + &node_type_id, + ReferenceTypeId::HasTypeDefinition + )); // Try one time deleting references, the other time not deleting them. let delete_references = i == 1; @@ -603,14 +801,32 @@ fn delete_node() { if !delete_references { // Deleted the node but not refs assert!(address_space.find_node(&node_id).is_none()); - assert!(address_space.has_reference(&ObjectId::ObjectsFolder.into(), &node_id, ReferenceTypeId::Organizes)); - assert!(address_space.has_reference(&node_id, &node_type_id, ReferenceTypeId::HasTypeDefinition)); + assert!(address_space.has_reference( + &ObjectId::ObjectsFolder.into(), + &node_id, + ReferenceTypeId::Organizes + )); + assert!(address_space.has_reference( + &node_id, + &node_type_id, + ReferenceTypeId::HasTypeDefinition + )); } else { // Delete the node and the refs assert!(address_space.find_node(&node_id).is_none()); - assert!(!address_space.has_reference(&ObjectId::ObjectsFolder.into(), &node_id, ReferenceTypeId::Organizes)); - assert!(!address_space.has_reference(&node_id, &node_type_id, ReferenceTypeId::HasTypeDefinition)); - assert!(!address_space.references().reference_to_node_exists(&node_id)); + assert!(!address_space.has_reference( + &ObjectId::ObjectsFolder.into(), + &node_id, + ReferenceTypeId::Organizes + )); + assert!(!address_space.has_reference( + &node_id, + &node_type_id, + ReferenceTypeId::HasTypeDefinition + )); + assert!(!address_space + .references() + .reference_to_node_exists(&node_id)); } }); } @@ -621,21 +837,56 @@ fn is_subtype() { // Test subtypes against other and the expected result let subtypes = [ // Positive - (ObjectTypeId::BaseEventType, ObjectTypeId::BaseEventType, true), - (ObjectTypeId::AuditEventType, ObjectTypeId::BaseEventType, true), - (ObjectTypeId::BaseModelChangeEventType, ObjectTypeId::BaseEventType, true), - (ObjectTypeId::AuditHistoryUpdateEventType, ObjectTypeId::BaseEventType, true), - (ObjectTypeId::AuditUrlMismatchEventType, ObjectTypeId::AuditSessionEventType, true), + ( + ObjectTypeId::BaseEventType, + ObjectTypeId::BaseEventType, + true, + ), + ( + ObjectTypeId::AuditEventType, + ObjectTypeId::BaseEventType, + true, + ), + ( + ObjectTypeId::BaseModelChangeEventType, + ObjectTypeId::BaseEventType, + true, + ), + ( + ObjectTypeId::AuditHistoryUpdateEventType, + ObjectTypeId::BaseEventType, + true, + ), + ( + ObjectTypeId::AuditUrlMismatchEventType, + ObjectTypeId::AuditSessionEventType, + true, + ), // Negative // BaseEventType is not a subtype of AuditEventType - (ObjectTypeId::BaseEventType, ObjectTypeId::AuditEventType, false), + ( + ObjectTypeId::BaseEventType, + ObjectTypeId::AuditEventType, + false, + ), // DeviceFailureEventType is not a subtype of ProgressEventType (different branches) - (ObjectTypeId::DeviceFailureEventType, ObjectTypeId::ProgressEventType, false), + ( + ObjectTypeId::DeviceFailureEventType, + ObjectTypeId::ProgressEventType, + false, + ), // SystemEventType is not a subtype of ProgressEventType (peers) - (ObjectTypeId::SystemEventType, ObjectTypeId::ProgressEventType, false), + ( + ObjectTypeId::SystemEventType, + ObjectTypeId::ProgressEventType, + false, + ), ]; subtypes.iter().for_each(|v| { - println!("Expecting {:?} to be a subtype of {:?} == {:?}", v.0, v.1, v.2); + println!( + "Expecting {:?} to be a subtype of {:?} == {:?}", + v.0, v.1, v.2 + ); assert_eq!(address_space.is_subtype(&v.0.into(), &v.1.into()), v.2); }); } @@ -645,7 +896,9 @@ fn hierarchical_references() { let address_space = AddressSpace::new(); // Try with root - let refs = address_space.find_hierarchical_references(&NodeId::root_folder_id()).unwrap(); + let refs = address_space + .find_hierarchical_references(&NodeId::root_folder_id()) + .unwrap(); assert_eq!(refs.len(), 3); assert!(refs.contains(&NodeId::objects_folder_id())); assert!(refs.contains(&NodeId::views_folder_id())); @@ -659,9 +912,13 @@ fn hierarchical_references() { assert!(refs.contains(&VariableId::Server_ServerCapabilities_ServerProfileArray.into())); assert!(refs.contains(&VariableId::Server_ServerCapabilities_LocaleIdArray.into())); assert!(refs.contains(&VariableId::Server_ServerCapabilities_MinSupportedSampleRate.into())); - assert!(refs.contains(&VariableId::Server_ServerCapabilities_MaxBrowseContinuationPoints.into())); + assert!( + refs.contains(&VariableId::Server_ServerCapabilities_MaxBrowseContinuationPoints.into()) + ); assert!(refs.contains(&VariableId::Server_ServerCapabilities_MaxQueryContinuationPoints.into())); - assert!(refs.contains(&VariableId::Server_ServerCapabilities_MaxHistoryContinuationPoints.into())); + assert!( + refs.contains(&VariableId::Server_ServerCapabilities_MaxHistoryContinuationPoints.into()) + ); assert!(refs.contains(&VariableId::Server_ServerCapabilities_SoftwareCertificates.into())); assert!(refs.contains(&VariableId::Server_ServerCapabilities_MaxArrayLength.into())); assert!(refs.contains(&VariableId::Server_ServerCapabilities_MaxStringLength.into())); @@ -670,4 +927,4 @@ fn hierarchical_references() { assert!(refs.contains(&ObjectId::Server_ServerCapabilities_ModellingRules.into())); assert!(refs.contains(&ObjectId::Server_ServerCapabilities_AggregateFunctions.into())); assert!(refs.contains(&ObjectId::HistoryServerCapabilities.into())); -} \ No newline at end of file +} diff --git a/server/src/tests/events.rs b/server/src/tests/events.rs index 54aaacc6f..a205ad121 100644 --- a/server/src/tests/events.rs +++ b/server/src/tests/events.rs @@ -1,19 +1,15 @@ use std::collections::HashSet; use opcua_types::{ - AttributeId, LocalizedText, node_ids::ReferenceTypeId, NodeId, ObjectId, ObjectTypeId, operand::{ContentFilterBuilder, Operand}, QualifiedName, service_types::ContentFilterElement, - UAString, - DataTypeId, - VariableTypeId, - Variant, + node_ids::ReferenceTypeId, + operand::{ContentFilterBuilder, Operand}, + service_types::ContentFilterElement, + AttributeId, DataTypeId, LocalizedText, NodeId, ObjectId, ObjectTypeId, QualifiedName, + UAString, VariableTypeId, Variant, }; use crate::{ - address_space::{ - AddressSpace, - object_type::ObjectTypeBuilder, - variable::VariableBuilder, - }, + address_space::{object_type::ObjectTypeBuilder, variable::VariableBuilder, AddressSpace}, events::event::{BaseEventType, Event}, events::event_filter, events::operator, @@ -40,10 +36,18 @@ impl Event for TestEventType { match self.base.raise(address_space) { Ok(node_id) => { let property_id = NodeId::next_numeric(2); - self.add_property(&node_id, property_id, "Foo", "Foo", DataTypeId::Int32, self.foo, address_space); + self.add_property( + &node_id, + property_id, + "Foo", + "Foo", + DataTypeId::Int32, + self.foo, + address_space, + ); Ok(node_id) } - err => err + err => err, } } } @@ -51,18 +55,36 @@ impl Event for TestEventType { base_event_impl!(TestEventType, base); impl TestEventType { - fn new(node_id: R, browse_name: S, display_name: T, parent_node: U, source_node: V, foo: i32) -> Self - where R: Into, - S: Into, - T: Into, - U: Into, - V: Into { + fn new( + node_id: R, + browse_name: S, + display_name: T, + parent_node: U, + source_node: V, + foo: i32, + ) -> Self + where + R: Into, + S: Into, + T: Into, + U: Into, + V: Into, + { let event_type_id = Self::event_type_id(); let source_node: NodeId = source_node.into(); Self { - base: BaseEventType::new_now(node_id, event_type_id, browse_name, display_name, parent_node) - .source_node(source_node.clone()) - .message(LocalizedText::from(format!("A Test event from {:?}", source_node))), + base: BaseEventType::new_now( + node_id, + event_type_id, + browse_name, + display_name, + parent_node, + ) + .source_node(source_node.clone()) + .message(LocalizedText::from(format!( + "A Test event from {:?}", + source_node + ))), foo, } } @@ -72,9 +94,21 @@ impl TestEventType { } } -fn create_event(address_space: &mut AddressSpace, node_id: NodeId, source_machine_id: &NodeId, foo: i32) { +fn create_event( + address_space: &mut AddressSpace, + node_id: NodeId, + source_machine_id: &NodeId, + foo: i32, +) { let event_name = format!("Event{}", foo); - let mut event = TestEventType::new(&node_id, event_name.clone(), event_name, NodeId::objects_folder_id(), source_machine_id, foo); + let mut event = TestEventType::new( + &node_id, + event_name.clone(), + event_name, + NodeId::objects_folder_id(), + source_machine_id, + foo, + ); let _ = event.raise(address_space); } @@ -100,13 +134,19 @@ fn address_space() -> AddressSpace { .insert(&mut address_space); // Create an event of that type - create_event(&mut address_space, event_id(), &ObjectId::Server.into(), 100); + create_event( + &mut address_space, + event_id(), + &ObjectId::Server.into(), + 100, + ); address_space } fn do_operator_test(f: T) - where T: FnOnce(&AddressSpace, &NodeId, &mut HashSet, &Vec) +where + T: FnOnce(&AddressSpace, &NodeId, &mut HashSet, &Vec), { opcua_console_logging::init(); let mut used_elements = HashSet::new(); @@ -124,15 +164,36 @@ fn test_eq() { do_operator_test(|address_space, object_id, used_elements, elements| { // Simple test, compare two values of the same kind let operands = &[Operand::literal(10), Operand::literal(10)]; - let result = operator::eq(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::eq( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); let operands = &[Operand::literal(9), Operand::literal(10)]; - let result = operator::eq(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::eq( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); let operands = &[Operand::literal(10), Operand::literal(11)]; - let result = operator::eq(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::eq( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); }); } @@ -142,15 +203,36 @@ fn test_lt() { do_operator_test(|address_space, object_id, used_elements, elements| { // Simple test, compare two values of the same kind let operands = &[Operand::literal(9), Operand::literal(10)]; - let result = operator::lt(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::lt( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); let operands = &[Operand::literal(10), Operand::literal(10)]; - let result = operator::lt(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::lt( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); let operands = &[Operand::literal(11), Operand::literal(10)]; - let result = operator::lt(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::lt( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); }); } @@ -160,15 +242,36 @@ fn test_lte() { do_operator_test(|address_space, object_id, used_elements, elements| { // Simple test, compare two values of the same kind let operands = &[Operand::literal(9), Operand::literal(10)]; - let result = operator::lte(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::lte( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); let operands = &[Operand::literal(10), Operand::literal(10)]; - let result = operator::lte(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::lte( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); let operands = &[Operand::literal(11), Operand::literal(10)]; - let result = operator::lte(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::lte( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); }); } @@ -178,15 +281,36 @@ fn test_gt() { do_operator_test(|address_space, object_id, used_elements, elements| { // Simple test, compare two values of the same kind let operands = [Operand::literal(11), Operand::literal(10)]; - let result = operator::gt(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::gt( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); let operands = &[Operand::literal(10), Operand::literal(10)]; - let result = operator::gt(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::gt( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); let operands = &[Operand::literal(9), Operand::literal(10)]; - let result = operator::gt(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::gt( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); }); } @@ -196,15 +320,36 @@ fn test_gte() { do_operator_test(|address_space, object_id, used_elements, elements| { // Simple test, compare two values of the same kind let operands = &[Operand::literal(11), Operand::literal(10)]; - let result = operator::gte(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::gte( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); let operands = &[Operand::literal(10), Operand::literal(10)]; - let result = operator::gte(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::gte( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); let operands = &[Operand::literal(9), Operand::literal(10)]; - let result = operator::gte(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::gte( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); }); } @@ -213,31 +358,73 @@ fn test_gte() { fn test_not() { do_operator_test(|address_space, object_id, used_elements, elements| { let operands = &[Operand::literal(false)]; - let result = operator::not(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::not( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); let operands = &[Operand::literal(true)]; - let result = operator::not(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::not( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); // String let operands = &[Operand::literal("0")]; - let result = operator::not(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::not( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); // String(2) let operands = &[Operand::literal("true")]; - let result = operator::not(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::not( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); // Invalid - Double let operands = &[Operand::literal(99.9)]; - let result = operator::not(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::not( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Empty); // Invalid - Int32 let operands = &[Operand::literal(1)]; - let result = operator::not(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::not( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Empty); }); } @@ -246,29 +433,84 @@ fn test_not() { fn test_between() { do_operator_test(|address_space, object_id, used_elements, elements| { // Test operator with some ranges and mix of types with implicit conversion - let operands = &[Operand::literal(12), Operand::literal(12), Operand::literal(13)]; - let result = operator::between(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let operands = &[ + Operand::literal(12), + Operand::literal(12), + Operand::literal(13), + ]; + let result = operator::between( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); - let operands = &[Operand::literal(13), Operand::literal(12), Operand::literal(13)]; - let result = operator::between(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let operands = &[ + Operand::literal(13), + Operand::literal(12), + Operand::literal(13), + ]; + let result = operator::between( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); - let operands = &[Operand::literal(12.3), Operand::literal(12.0), Operand::literal(12.4)]; - let result = operator::between(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let operands = &[ + Operand::literal(12.3), + Operand::literal(12.0), + Operand::literal(12.4), + ]; + let result = operator::between( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); - let operands = &[Operand::literal(11.99), Operand::literal(12.0), Operand::literal(13.0)]; - let result = operator::between(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let operands = &[ + Operand::literal(11.99), + Operand::literal(12.0), + Operand::literal(13.0), + ]; + let result = operator::between( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); - let operands = &[Operand::literal(13.0001), Operand::literal(12.0), Operand::literal(13.0)]; - let result = operator::between(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let operands = &[ + Operand::literal(13.0001), + Operand::literal(12.0), + Operand::literal(13.0), + ]; + let result = operator::between( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); -// let operands = &[Operand::literal("12.5"), Operand::literal(12), Operand::literal(13)]); -// let result = operator::between(&operands[..], used_elements, elements, address_space).unwrap(); -// assert_eq!(result, Variant::Boolean(true)); + // let operands = &[Operand::literal("12.5"), Operand::literal(12), Operand::literal(13)]); + // let result = operator::between(&operands[..], used_elements, elements, address_space).unwrap(); + // assert_eq!(result, Variant::Boolean(true)); }) } @@ -276,39 +518,102 @@ fn test_between() { fn test_and() { do_operator_test(|address_space, object_id, used_elements, elements| { let operands = &[Operand::literal(true), Operand::literal(true)]; - let result = operator::and(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::and( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); let operands = &[Operand::literal(false), Operand::literal(true)]; - let result = operator::and(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::and( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); let operands = &[Operand::literal(true), Operand::literal(false)]; - let result = operator::and(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::and( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); let operands = &[Operand::literal(false), Operand::literal(false)]; - let result = operator::and(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::and( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); let operands = &[Operand::literal(true), Operand::literal(())]; - let result = operator::and(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::and( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Empty); let operands = &[Operand::literal(()), Operand::literal(true)]; - let result = operator::and(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::and( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Empty); let operands = &[Operand::literal(false), Operand::literal(())]; - let result = operator::and(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::and( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); let operands = &[Operand::literal(()), Operand::literal(false)]; - let result = operator::and(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::and( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); let operands = &[Operand::literal(()), Operand::literal(())]; - let result = operator::and(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::and( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Empty); }) } @@ -317,67 +622,136 @@ fn test_and() { fn test_or() { do_operator_test(|address_space, object_id, used_elements, elements| { let operands = &[Operand::literal(true), Operand::literal(true)]; - let result = operator::or(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::or( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); let operands = &[Operand::literal(true), Operand::literal(false)]; - let result = operator::or(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::or( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); let operands = &[Operand::literal(false), Operand::literal(true)]; - let result = operator::or(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::or( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); let operands = &[Operand::literal(false), Operand::literal(false)]; - let result = operator::or(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::or( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); let operands = &[Operand::literal(true), Operand::literal(())]; - let result = operator::or(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::or( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); let operands = &[Operand::literal(()), Operand::literal(true)]; - let result = operator::or(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::or( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(true)); let operands = &[Operand::literal(false), Operand::literal(())]; - let result = operator::or(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::or( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Empty); let operands = &[Operand::literal(()), Operand::literal(false)]; - let result = operator::or(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::or( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Empty); }) } - #[test] fn test_in_list() { do_operator_test(|address_space, object_id, used_elements, elements| { let operands = &[Operand::literal(10), Operand::literal(false)]; - let result = operator::in_list(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::in_list( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); let operands = &[Operand::literal(true), Operand::literal(false)]; - let result = operator::in_list(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::in_list( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::Boolean(false)); /* - let operands = &[Operand::literal("true"), Operand::literal(true)]; - let result = operator::in_list(&operands[..], used_elements, elements, address_space).unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(99), Operand::literal(11), Operand::literal(()), Operand::literal(99.0)]; - let result = operator::in_list(&operands[..], used_elements, elements, address_space).unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(()), Operand::literal(11), Operand::literal(()), Operand::literal(99.0)]; - let result = operator::in_list(&operands[..], used_elements, elements, address_space).unwrap(); - assert_eq!(result, Variant::Boolean(true)); - - let operands = &[Operand::literal(33), Operand::literal(11), Operand::literal(()), Operand::literal(99.0)]; - let result = operator::in_list(&operands[..], used_elements, elements, address_space).unwrap(); - assert_eq!(result, Variant::Boolean(false)); - */ + let operands = &[Operand::literal("true"), Operand::literal(true)]; + let result = operator::in_list(&operands[..], used_elements, elements, address_space).unwrap(); + assert_eq!(result, Variant::Boolean(true)); + + let operands = &[Operand::literal(99), Operand::literal(11), Operand::literal(()), Operand::literal(99.0)]; + let result = operator::in_list(&operands[..], used_elements, elements, address_space).unwrap(); + assert_eq!(result, Variant::Boolean(true)); + + let operands = &[Operand::literal(()), Operand::literal(11), Operand::literal(()), Operand::literal(99.0)]; + let result = operator::in_list(&operands[..], used_elements, elements, address_space).unwrap(); + assert_eq!(result, Variant::Boolean(true)); + + let operands = &[Operand::literal(33), Operand::literal(11), Operand::literal(()), Operand::literal(99.0)]; + let result = operator::in_list(&operands[..], used_elements, elements, address_space).unwrap(); + assert_eq!(result, Variant::Boolean(false)); + */ }) } @@ -385,7 +759,14 @@ fn test_in_list() { fn test_bitwise_or() { do_operator_test(|address_space, object_id, used_elements, elements| { let operands = &[Operand::literal(0xff00u16), Operand::literal(0x00ffu16)]; - let result = operator::bitwise_or(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::bitwise_or( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::UInt16(0xffff)); }) } @@ -394,7 +775,14 @@ fn test_bitwise_or() { fn test_bitwise_and() { do_operator_test(|address_space, object_id, used_elements, elements| { let operands = &[Operand::literal(0xf00fu16), Operand::literal(0x00ffu16)]; - let result = operator::bitwise_and(&object_id, &operands[..], used_elements, elements, address_space).unwrap(); + let result = operator::bitwise_and( + &object_id, + &operands[..], + used_elements, + elements, + address_space, + ) + .unwrap(); assert_eq!(result, Variant::UInt16(0x000f)); }) } @@ -423,7 +811,10 @@ fn test_where_clause() { // Like operator let f = ContentFilterBuilder::new() - .is_like(Operand::literal("Hello world"), Operand::literal("[Hh]ello w%")) + .is_like( + Operand::literal("Hello world"), + Operand::literal("[Hh]ello w%"), + ) .build(); let result = event_filter::evaluate_where_clause(&object_id, &f, &address_space); assert_eq!(result.unwrap(), true.into()); @@ -451,11 +842,21 @@ fn test_where_clause() { (NodeId::objects_folder_id(), "Event100/Foo/Bar", 100, false), (event_id(), "", 100, false), ]; - expected.into_iter().for_each(|(node_id, browse_path, value_to_compare, expected)| { - let f = ContentFilterBuilder::new() - .is_eq(Operand::simple_attribute(ReferenceTypeId::Organizes, browse_path, AttributeId::Value, UAString::null()), Operand::literal(value_to_compare)) - .build(); - let result = event_filter::evaluate_where_clause(&node_id, &f, &address_space); - assert_eq!(result.unwrap(), expected.into()); - }); + expected + .into_iter() + .for_each(|(node_id, browse_path, value_to_compare, expected)| { + let f = ContentFilterBuilder::new() + .is_eq( + Operand::simple_attribute( + ReferenceTypeId::Organizes, + browse_path, + AttributeId::Value, + UAString::null(), + ), + Operand::literal(value_to_compare), + ) + .build(); + let result = event_filter::evaluate_where_clause(&node_id, &f, &address_space); + assert_eq!(result.unwrap(), expected.into()); + }); } diff --git a/server/src/tests/mod.rs b/server/src/tests/mod.rs index decb24283..24e3a7fe9 100644 --- a/server/src/tests/mod.rs +++ b/server/src/tests/mod.rs @@ -5,32 +5,24 @@ use std::sync::{Arc, RwLock}; use chrono; use time; -use opcua_types::{ - *, - status_code::StatusCode, -}; -use opcua_crypto::*; use opcua_core::{ - config::Config, - comms::secure_channel::SecureChannel, - supported_message::SupportedMessage, + comms::secure_channel::SecureChannel, config::Config, supported_message::SupportedMessage, }; +use opcua_crypto::*; +use opcua_types::{status_code::StatusCode, *}; use crate::{ - address_space::{ - address_space::*, - variable::*, - }, + address_space::{address_space::*, variable::*}, + builder::ServerBuilder, + config::ServerConfig, session::*, subscriptions::*, - config::ServerConfig, - builder::ServerBuilder, }; mod address_space; +mod events; mod services; mod subscriptions; -mod events; fn make_test_file(filename: &str) -> PathBuf { let mut path = std::env::temp_dir(); @@ -50,13 +42,20 @@ fn add_sample_vars_to_address_space(address_space: Arc>) { let ns = address_space.register_namespace("urn:test").unwrap(); // Create a sample folder under objects folder - let sample_folder_id = address_space.add_folder("Sample", "Sample", &NodeId::objects_folder_id()).unwrap(); + let sample_folder_id = address_space + .add_folder("Sample", "Sample", &NodeId::objects_folder_id()) + .unwrap(); // Add some variables to our sample folder let vars = vec![ Variable::new(&NodeId::new(ns, "v1"), "v1", "v1", 30i32), Variable::new(&NodeId::new(ns, 300), "v2", "v2", true), - Variable::new(&NodeId::new(ns, "v3"), "v3", "v3", UAString::from("Hello world")), + Variable::new( + &NodeId::new(ns, "v3"), + "v3", + "v3", + UAString::from("Hello world"), + ), Variable::new(&NodeId::new(ns, "v4"), "v4", "v4", 100.123f64), ]; let _ = address_space.add_variables(vars, &sample_folder_id); @@ -96,7 +95,12 @@ pub fn server_config_invalid() { // Insert a nonexistent user config = ServerBuilder::new_anonymous("foo").config(); - config.endpoints.get_mut("none").unwrap().user_token_ids.insert("hello".to_string()); + config + .endpoints + .get_mut("none") + .unwrap() + .user_token_ids + .insert("hello".to_string()); assert_eq!(config.is_valid(), false); } @@ -143,14 +147,19 @@ pub fn expired_publish_requests() { // Expire requests, see which expire session.expire_stale_publish_requests(&now_plus_5s); - // The > 30s timeout hint request should be expired and the other should remain // Remain { let publish_request_queue = session.subscriptions_mut().publish_request_queue(); assert_eq!(publish_request_queue.len(), 1); - assert_eq!(publish_request_queue[0].request.request_header.request_handle, 1000); + assert_eq!( + publish_request_queue[0] + .request + .request_header + .request_handle, + 1000 + ); } // Expire @@ -161,9 +170,12 @@ pub fn expired_publish_requests() { let r1 = &publish_response_queue[0]; if let SupportedMessage::ServiceFault(ref response_header) = r1.response { assert_eq!(response_header.response_header.request_handle, 2000); - assert_eq!(response_header.response_header.service_result, StatusCode::BadTimeout); + assert_eq!( + response_header.response_header.service_result, + StatusCode::BadTimeout + ); } else { panic!("Expected service faults for timed out publish requests") } } -} \ No newline at end of file +} diff --git a/server/src/tests/services/attribute.rs b/server/src/tests/services/attribute.rs index f8f1fe22e..f4fe2f41b 100644 --- a/server/src/tests/services/attribute.rs +++ b/server/src/tests/services/attribute.rs @@ -2,10 +2,7 @@ use chrono::Duration; use opcua_types::{Variant, WriteMask}; -use crate::{ - address_space::AccessLevel, - services::attribute::AttributeService, -}; +use crate::{address_space::AccessLevel, services::attribute::AttributeService}; use super::*; @@ -18,7 +15,11 @@ fn read_value(node_id: &NodeId, attribute_id: AttributeId) -> ReadValueId { } } -fn read_value_range(node_id: &NodeId, attribute_id: AttributeId, index_range: UAString) -> ReadValueId { +fn read_value_range( + node_id: &NodeId, + attribute_id: AttributeId, + index_range: UAString, +) -> ReadValueId { ReadValueId { node_id: node_id.clone(), attribute_id: attribute_id as u32, @@ -27,7 +28,11 @@ fn read_value_range(node_id: &NodeId, attribute_id: AttributeId, index_range: UA } } -fn read_value_encoding(node_id: &NodeId, attribute_id: AttributeId, data_encoding: QualifiedName) -> ReadValueId { +fn read_value_encoding( + node_id: &NodeId, + attribute_id: AttributeId, + data_encoding: QualifiedName, +) -> ReadValueId { ReadValueId { node_id: node_id.clone(), attribute_id: attribute_id as u32, @@ -41,17 +46,30 @@ fn node_ids(address_space: Arc>) -> Vec { let mut address_space = trace_write_lock_unwrap!(address_space); // Remove read access to [3] for a test below let node = address_space.find_node_mut(&node_ids[3]).unwrap(); - let r = node.as_mut_node().set_attribute(AttributeId::UserAccessLevel, Variant::from(0u8)); + let r = node + .as_mut_node() + .set_attribute(AttributeId::UserAccessLevel, Variant::from(0u8)); assert!(r.is_ok()); node_ids } fn do_attribute_service_test(f: F) - where F: FnOnce(Arc>, Arc>, Arc>, &AttributeService) +where + F: FnOnce( + Arc>, + Arc>, + Arc>, + &AttributeService, + ), { // Set up some nodes let st = ServiceTest::new(); - f(st.server_state.clone(), st.session.clone(), st.address_space.clone(), &AttributeService::new()) + f( + st.server_state.clone(), + st.session.clone(), + st.address_space.clone(), + &AttributeService::new(), + ) } #[test] @@ -103,25 +121,40 @@ fn read() { assert!(results[1].server_timestamp.is_none()); // 3. a variable without the required attribute - assert_eq!(results[2].status.as_ref().unwrap(), &StatusCode::BadAttributeIdInvalid); + assert_eq!( + results[2].status.as_ref().unwrap(), + &StatusCode::BadAttributeIdInvalid + ); assert!(results[2].source_timestamp.is_none()); assert!(results[2].server_timestamp.is_none()); // 4. a variable with no read access - assert_eq!(results[3].status.as_ref().unwrap(), &StatusCode::BadNotReadable); + assert_eq!( + results[3].status.as_ref().unwrap(), + &StatusCode::BadNotReadable + ); assert!(results[3].source_timestamp.is_none()); assert!(results[3].server_timestamp.is_none()); // 5. Non existent - assert_eq!(results[4].status.as_ref().unwrap(), &StatusCode::BadNodeIdUnknown); + assert_eq!( + results[4].status.as_ref().unwrap(), + &StatusCode::BadNodeIdUnknown + ); assert!(results[4].source_timestamp.is_none()); assert!(results[4].server_timestamp.is_none()); // 6. Index range on a non-value - assert_eq!(results[5].status.as_ref().unwrap(), &StatusCode::BadIndexRangeNoData); + assert_eq!( + results[5].status.as_ref().unwrap(), + &StatusCode::BadIndexRangeNoData + ); // 7. Invalid encoding - assert_eq!(results[6].status.as_ref().unwrap(), &StatusCode::BadDataEncodingInvalid); + assert_eq!( + results[6].status.as_ref().unwrap(), + &StatusCode::BadDataEncodingInvalid + ); } // OTHER POTENTIAL TESTS @@ -142,9 +175,7 @@ fn read_invalid_timestamps() { let node_ids = node_ids(address_space.clone()); // Read a non existent variable - let nodes_to_read = vec![ - read_value(&node_ids[0], AttributeId::Value), - ]; + let nodes_to_read = vec![read_value(&node_ids[0], AttributeId::Value)]; let request = ReadRequest { request_header: make_request_header(), max_age: 0f64, @@ -155,7 +186,10 @@ fn read_invalid_timestamps() { let response = ats.read(server_state, session, address_space, &request); let response = supported_message_as!(response, ServiceFault); - assert_eq!(response.response_header.service_result, StatusCode::BadTimestampsToReturnInvalid); + assert_eq!( + response.response_header.service_result, + StatusCode::BadTimestampsToReturnInvalid + ); }); } @@ -168,8 +202,15 @@ fn write_value(node_id: &NodeId, attribute_id: AttributeId, value: DataValue) -> } } -fn write_value_index_range(node_id: &NodeId, attribute_id: AttributeId, index_range: V, value: DataValue) -> WriteValue - where V: Into { +fn write_value_index_range( + node_id: &NodeId, + attribute_id: AttributeId, + index_range: V, + value: DataValue, +) -> WriteValue +where + V: Into, +{ WriteValue { node_id: node_id.clone(), attribute_id: attribute_id as u32, @@ -179,7 +220,13 @@ fn write_value_index_range(node_id: &NodeId, attribute_id: AttributeId, index } // Boiler plate helper makes a request and grabs a response -fn write_request(server_state: Arc>, session: Arc>, address_space: Arc>, ats: &AttributeService, nodes_to_write: Vec) -> WriteResponse { +fn write_request( + server_state: Arc>, + session: Arc>, + address_space: Arc>, + ats: &AttributeService, + nodes_to_write: Vec, +) -> WriteResponse { let request = WriteRequest { request_header: make_request_header(), nodes_to_write: Some(nodes_to_write), @@ -190,11 +237,19 @@ fn write_request(server_state: Arc>, session: Arc(address_space: Arc>, node_id: &NodeId, f: F) where F: FnOnce(&Variant) { +fn validate_variable_value(address_space: Arc>, node_id: &NodeId, f: F) +where + F: FnOnce(&Variant), +{ let address_space = trace_read_lock_unwrap!(address_space); let node = address_space.find_node(&node_id).unwrap(); if let NodeType::Variable(node) = node { - let value = node.value(TimestampsToReturn::Neither, NumericRange::None, &QualifiedName::null(), 0.); + let value = node.value( + TimestampsToReturn::Neither, + NumericRange::None, + &QualifiedName::null(), + 0., + ); f(&value.value.unwrap()); } else { panic!(); @@ -220,21 +275,38 @@ fn write() { } 2 => { // Remove write access to the value by setting access level to 0 - let _ = node.as_mut_node().set_attribute(AttributeId::UserAccessLevel, Variant::from(0u8)).unwrap(); + let _ = node + .as_mut_node() + .set_attribute(AttributeId::UserAccessLevel, Variant::from(0u8)) + .unwrap(); } 6 => { node.as_mut_node().set_write_mask(WriteMask::ACCESS_LEVEL); } _ => { // Write access - let _ = node.as_mut_node().set_attribute(AttributeId::AccessLevel, Variant::from(AccessLevel::CURRENT_WRITE.bits())).unwrap(); - let _ = node.as_mut_node().set_attribute(AttributeId::UserAccessLevel, Variant::from(UserAccessLevel::CURRENT_WRITE.bits())).unwrap(); + let _ = node + .as_mut_node() + .set_attribute( + AttributeId::AccessLevel, + Variant::from(AccessLevel::CURRENT_WRITE.bits()), + ) + .unwrap(); + let _ = node + .as_mut_node() + .set_attribute( + AttributeId::UserAccessLevel, + Variant::from(UserAccessLevel::CURRENT_WRITE.bits()), + ) + .unwrap(); } } } // change HasEncoding node with write access so response can be compared to HasChild which will be left alone - let node = address_space.find_node_mut(&ReferenceTypeId::HasEncoding.into()).unwrap(); + let node = address_space + .find_node_mut(&ReferenceTypeId::HasEncoding.into()) + .unwrap(); node.as_mut_node().set_write_mask(WriteMask::IS_ABSTRACT); node_ids @@ -246,26 +318,60 @@ fn write() { // This is a cross section of variables and other kinds of nodes that we want to write to let nodes_to_write = vec![ // 1. a variable value - write_value(&node_ids[0], AttributeId::Value, DataValue::new_now(100 as i32)), + write_value( + &node_ids[0], + AttributeId::Value, + DataValue::new_now(100 as i32), + ), // 2. a variable with a bad attribute (IsAbstract doesn't exist on a var) - write_value(&node_ids[1], AttributeId::IsAbstract, DataValue::new_now(true)), + write_value( + &node_ids[1], + AttributeId::IsAbstract, + DataValue::new_now(true), + ), // 3. a variable value which has no write access - write_value(&node_ids[2], AttributeId::Value, DataValue::new_now(200 as i32)), + write_value( + &node_ids[2], + AttributeId::Value, + DataValue::new_now(200 as i32), + ), // 4. a node of some kind other than variable - write_value(&ReferenceTypeId::HasEncoding.into(), AttributeId::IsAbstract, DataValue::new_now(false)), + write_value( + &ReferenceTypeId::HasEncoding.into(), + AttributeId::IsAbstract, + DataValue::new_now(false), + ), // 5. a node with some kind other than variable with no write mask - write_value(&ReferenceTypeId::HasChild.into(), AttributeId::IsAbstract, DataValue::new_now(false)), + write_value( + &ReferenceTypeId::HasChild.into(), + AttributeId::IsAbstract, + DataValue::new_now(false), + ), // 6. a non existent variable - write_value(&NodeId::new(2, "vxxx"), AttributeId::Value, DataValue::new_now(100i32)), + write_value( + &NodeId::new(2, "vxxx"), + AttributeId::Value, + DataValue::new_now(100i32), + ), // 7. wrong type for attribute - write_value(&node_ids[6], AttributeId::AccessLevel, DataValue::new_now(-1i8)), + write_value( + &node_ids[6], + AttributeId::AccessLevel, + DataValue::new_now(-1i8), + ), // 8. a data value with no value write_value(&node_ids[7], AttributeId::Value, data_value_empty), ]; let nodes_to_write_len = nodes_to_write.len(); - let response = write_request(server_state, session, address_space.clone(), ats, nodes_to_write); + let response = write_request( + server_state, + session, + address_space.clone(), + ats, + nodes_to_write, + ); let results = response.results.unwrap(); assert_eq!(results.len(), nodes_to_write_len); @@ -311,30 +417,36 @@ fn write_bytestring_to_byte_array() { } let bytes = ByteString::from(vec![0x1u8, 0x2u8, 0x3u8, 0x4u8]); - let nodes_to_write = vec![ - write_value(&node_id, AttributeId::Value, DataValue::new_now(bytes)), - ]; + let nodes_to_write = vec![write_value( + &node_id, + AttributeId::Value, + DataValue::new_now(bytes), + )]; // Do a write - let response = write_request(server_state, session, address_space.clone(), ats, nodes_to_write); + let response = write_request( + server_state, + session, + address_space.clone(), + ats, + nodes_to_write, + ); let results = response.results.unwrap(); // Expect the write to have succeeded assert_eq!(results[0], StatusCode::Good); // Test the node expecting it to be an array with 4 Byte values - validate_variable_value(address_space, &node_id, |value| { - match value { - Variant::Array(array) => { - let values = &array.values; - assert_eq!(values.len(), 4); - assert_eq!(values[0], Variant::Byte(0x1u8)); - assert_eq!(values[1], Variant::Byte(0x2u8)); - assert_eq!(values[2], Variant::Byte(0x3u8)); - assert_eq!(values[3], Variant::Byte(0x4u8)); - } - _ => panic!() + validate_variable_value(address_space, &node_id, |value| match value { + Variant::Array(array) => { + let values = &array.values; + assert_eq!(values.len(), 4); + assert_eq!(values[0], Variant::Byte(0x1u8)); + assert_eq!(values[1], Variant::Byte(0x2u8)); + assert_eq!(values[2], Variant::Byte(0x3u8)); + assert_eq!(values[3], Variant::Byte(0x4u8)); } + _ => panic!(), }); }); } @@ -347,32 +459,53 @@ fn write_index_range() { let node_id_1 = NodeId::next_numeric(2); let node_id_2 = NodeId::next_numeric(2); - [&node_id_1, &node_id_2].iter().enumerate().for_each(|(i, node_id)| { - let mut address_space = trace_write_lock_unwrap!(address_space); - let _ = VariableBuilder::new(node_id, var_name(i), "") - .data_type(DataTypeId::Byte) - .value_rank(1) - .value(vec![0u8; 16]) - .organized_by(ObjectId::RootFolder) - .writable() - .insert(&mut address_space); - }); + [&node_id_1, &node_id_2] + .iter() + .enumerate() + .for_each(|(i, node_id)| { + let mut address_space = trace_write_lock_unwrap!(address_space); + let _ = VariableBuilder::new(node_id, var_name(i), "") + .data_type(DataTypeId::Byte) + .value_rank(1) + .value(vec![0u8; 16]) + .organized_by(ObjectId::RootFolder) + .writable() + .insert(&mut address_space); + }); let index: usize = 12; let index_expected_value = 73u8; let index_bytes = Variant::from(vec![index_expected_value]); let (range_min, range_max) = (4 as usize, 12 as usize); - let range_bytes = vec![0x1u8, 0x2u8, 0x3u8, 0x4u8, 0x5u8, 0x6u8, 0x7u8, 0x8u8, 0x9u8]; + let range_bytes = vec![ + 0x1u8, 0x2u8, 0x3u8, 0x4u8, 0x5u8, 0x6u8, 0x7u8, 0x8u8, 0x9u8, + ]; let range_value = Variant::from(range_bytes.clone()); let nodes_to_write = vec![ - write_value_index_range(&node_id_1, AttributeId::Value, format!("{}", index), DataValue::new_now(index_bytes)), - write_value_index_range(&node_id_2, AttributeId::Value, format!("{}:{}", range_min, range_max), DataValue::new_now(range_value)), + write_value_index_range( + &node_id_1, + AttributeId::Value, + format!("{}", index), + DataValue::new_now(index_bytes), + ), + write_value_index_range( + &node_id_2, + AttributeId::Value, + format!("{}:{}", range_min, range_max), + DataValue::new_now(range_value), + ), ]; // Do a write - let response = write_request(server_state, session, address_space.clone(), ats, nodes_to_write); + let response = write_request( + server_state, + session, + address_space.clone(), + ats, + nodes_to_write, + ); let results = response.results.unwrap(); // Expect the write to have succeeded @@ -386,11 +519,15 @@ fn write_index_range() { assert_eq!(values.len(), 16); values.iter().enumerate().for_each(|(i, v)| { // Only one element set, others should not be set - let expected = if i == index { index_expected_value } else { 0u8 }; + let expected = if i == index { + index_expected_value + } else { + 0u8 + }; assert_eq!(*v, Variant::Byte(expected)); }); } - _ => panic!() + _ => panic!(), } }); @@ -409,7 +546,7 @@ fn write_index_range() { assert_eq!(*v, Variant::Byte(expected)); }); } - _ => panic!() + _ => panic!(), } }); }); @@ -417,16 +554,26 @@ fn write_index_range() { // #[test] fn write_null_value() { /* Write an empty variant to a value and see that it is allowed */} - struct DataProvider; impl HistoricalDataProvider for DataProvider { - fn read_raw_modified_details(&self, _address_space: Arc>, _request: ReadRawModifiedDetails, _timestamps_to_return: TimestampsToReturn, _release_continuation_points: bool, _nodes_to_read: &[HistoryReadValueId]) -> Result, StatusCode> { + fn read_raw_modified_details( + &self, + _address_space: Arc>, + _request: ReadRawModifiedDetails, + _timestamps_to_return: TimestampsToReturn, + _release_continuation_points: bool, + _nodes_to_read: &[HistoryReadValueId], + ) -> Result, StatusCode> { info!("DataProvider's read_raw_modified_details"); Ok(DataProvider::historical_read_result()) } - fn delete_raw_modified_details(&self, _address_space: Arc>, _request: DeleteRawModifiedDetails) -> Result, StatusCode> { + fn delete_raw_modified_details( + &self, + _address_space: Arc>, + _request: DeleteRawModifiedDetails, + ) -> Result, StatusCode> { info!("DataProvider's delete_raw_modified_details"); Ok(vec![StatusCode::Good]) } @@ -434,24 +581,21 @@ impl HistoricalDataProvider for DataProvider { impl DataProvider { pub fn historical_read_result() -> Vec { - vec![ - HistoryReadResult { - status_code: StatusCode::Good, - continuation_point: ByteString::null(), - history_data: ExtensionObject::null(), - }] + vec![HistoryReadResult { + status_code: StatusCode::Good, + continuation_point: ByteString::null(), + history_data: ExtensionObject::null(), + }] } } fn nodes_to_read() -> Vec { - vec![ - HistoryReadValueId { - node_id: NodeId::new(2, "test"), - index_range: UAString::null(), - data_encoding: QualifiedName::null(), // TODO - continuation_point: ByteString::null(), - } - ] + vec![HistoryReadValueId { + node_id: NodeId::new(2, "test"), + index_range: UAString::null(), + data_encoding: QualifiedName::null(), // TODO + continuation_point: ByteString::null(), + }] } fn read_raw_modified_details() -> ReadRawModifiedDetails { @@ -475,7 +619,10 @@ fn history_read_nothing_to_do_1() { // Register a history data provider // Send a valid read details command but with no nodes to read let read_raw_modified_details = read_raw_modified_details(); - let history_read_details = ExtensionObject::from_encodable(ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary, &read_raw_modified_details); + let history_read_details = ExtensionObject::from_encodable( + ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary, + &read_raw_modified_details, + ); let request = HistoryReadRequest { request_header: make_request_header(), history_read_details, @@ -483,8 +630,14 @@ fn history_read_nothing_to_do_1() { release_continuation_points: true, nodes_to_read: None, }; - let response: ServiceFault = supported_message_as!(ats.history_read(server_state, session, address_space.clone(), &request), ServiceFault); - assert_eq!(response.response_header.service_result, StatusCode::BadNothingToDo); + let response: ServiceFault = supported_message_as!( + ats.history_read(server_state, session, address_space.clone(), &request), + ServiceFault + ); + assert_eq!( + response.response_header.service_result, + StatusCode::BadNothingToDo + ); }); } @@ -499,8 +652,14 @@ fn history_read_nothing_history_operation_invalid() { release_continuation_points: true, nodes_to_read: Some(nodes_to_read()), }; - let response: ServiceFault = supported_message_as!(ats.history_read(server_state, session, address_space, &request), ServiceFault); - assert_eq!(response.response_header.service_result, StatusCode::BadHistoryOperationInvalid); + let response: ServiceFault = supported_message_as!( + ats.history_read(server_state, session, address_space, &request), + ServiceFault + ); + assert_eq!( + response.response_header.service_result, + StatusCode::BadHistoryOperationInvalid + ); }); } @@ -515,7 +674,10 @@ fn history_read_nothing_data_provider() { // Call ReadRawModifiedDetails on the registered callback and expect a call back let read_raw_modified_details = read_raw_modified_details(); - let history_read_details = ExtensionObject::from_encodable(ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary, &read_raw_modified_details); + let history_read_details = ExtensionObject::from_encodable( + ObjectId::ReadRawModifiedDetails_Encoding_DefaultBinary, + &read_raw_modified_details, + ); let request = HistoryReadRequest { request_header: make_request_header(), history_read_details, @@ -523,7 +685,10 @@ fn history_read_nothing_data_provider() { release_continuation_points: true, nodes_to_read: Some(nodes_to_read()), }; - let response: HistoryReadResponse = supported_message_as!(ats.history_read(server_state, session, address_space, &request), HistoryReadResponse); + let response: HistoryReadResponse = supported_message_as!( + ats.history_read(server_state, session, address_space, &request), + HistoryReadResponse + ); let expected_read_result = DataProvider::historical_read_result(); assert_eq!(response.results, Some(expected_read_result)); }); @@ -549,8 +714,14 @@ fn history_update_nothing_to_do_1() { request_header: make_request_header(), history_update_details: None, }; - let response: ServiceFault = supported_message_as!(ats.history_update(server_state, session, address_space, &request), ServiceFault); - assert_eq!(response.response_header.service_result, StatusCode::BadNothingToDo); + let response: ServiceFault = supported_message_as!( + ats.history_update(server_state, session, address_space, &request), + ServiceFault + ); + assert_eq!( + response.response_header.service_result, + StatusCode::BadNothingToDo + ); }); } @@ -562,8 +733,14 @@ fn history_update_nothing_to_do_2() { request_header: make_request_header(), history_update_details: Some(vec![]), }; - let response: ServiceFault = supported_message_as!(ats.history_update(server_state, session, address_space, &request), ServiceFault); - assert_eq!(response.response_header.service_result, StatusCode::BadNothingToDo); + let response: ServiceFault = supported_message_as!( + ats.history_update(server_state, session, address_space, &request), + ServiceFault + ); + assert_eq!( + response.response_header.service_result, + StatusCode::BadNothingToDo + ); }); } @@ -575,7 +752,10 @@ fn history_update_history_operation_invalid() { request_header: make_request_header(), history_update_details: Some(vec![ExtensionObject::null()]), }; - let response: HistoryUpdateResponse = supported_message_as!(ats.history_update(server_state, session, address_space, &request), HistoryUpdateResponse); + let response: HistoryUpdateResponse = supported_message_as!( + ats.history_update(server_state, session, address_space, &request), + HistoryUpdateResponse + ); let results = response.results.unwrap(); assert_eq!(results.len(), 1); @@ -591,17 +771,26 @@ fn history_update_history_operation_unsupported() { let delete_raw_modified_details = delete_raw_modified_details(); // Unsupported operation (everything by default) - let history_update_details = ExtensionObject::from_encodable(ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary, &delete_raw_modified_details); + let history_update_details = ExtensionObject::from_encodable( + ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary, + &delete_raw_modified_details, + ); let request = HistoryUpdateRequest { request_header: make_request_header(), history_update_details: Some(vec![history_update_details]), }; - let response: HistoryUpdateResponse = supported_message_as!(ats.history_update(server_state, session, address_space, &request), HistoryUpdateResponse); + let response: HistoryUpdateResponse = supported_message_as!( + ats.history_update(server_state, session, address_space, &request), + HistoryUpdateResponse + ); let results = response.results.unwrap(); assert_eq!(results.len(), 1); let result1 = &results[0]; - assert_eq!(result1.status_code, StatusCode::BadHistoryOperationUnsupported); + assert_eq!( + result1.status_code, + StatusCode::BadHistoryOperationUnsupported + ); }); } @@ -618,16 +807,22 @@ fn history_update_data_provider() { let delete_raw_modified_details = delete_raw_modified_details(); // Supported operation - let history_update_details = ExtensionObject::from_encodable(ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary, &delete_raw_modified_details); + let history_update_details = ExtensionObject::from_encodable( + ObjectId::DeleteRawModifiedDetails_Encoding_DefaultBinary, + &delete_raw_modified_details, + ); let request = HistoryUpdateRequest { request_header: make_request_header(), history_update_details: Some(vec![history_update_details]), }; - let response: HistoryUpdateResponse = supported_message_as!(ats.history_update(server_state, session, address_space, &request), HistoryUpdateResponse); + let response: HistoryUpdateResponse = supported_message_as!( + ats.history_update(server_state, session, address_space, &request), + HistoryUpdateResponse + ); let results = response.results.unwrap(); assert_eq!(results.len(), 1); let result1 = &results[0]; assert_eq!(result1.status_code, StatusCode::Good); }); -} \ No newline at end of file +} diff --git a/server/src/tests/services/discovery.rs b/server/src/tests/services/discovery.rs index d542b90d3..311a22379 100644 --- a/server/src/tests/services/discovery.rs +++ b/server/src/tests/services/discovery.rs @@ -5,7 +5,8 @@ use crate::services::discovery::DiscoveryService; use super::*; fn do_discovery_service_test(f: F) - where F: FnOnce(Arc>, Arc>, &DiscoveryService) +where + F: FnOnce(Arc>, Arc>, &DiscoveryService), { let st = ServiceTest::new(); let (server_state, session) = st.get_server_state_and_session(); @@ -102,7 +103,9 @@ fn discovery_test() { assert!(result.endpoints.is_none()); // Enter the binary transport profile and expect the endpoints - let profile_uris = vec![UAString::from("http://opcfoundation.org/UA-Profile/Transport/uatcp-uasc-uabinary")]; + let profile_uris = vec![UAString::from( + "http://opcfoundation.org/UA-Profile/Transport/uatcp-uasc-uabinary", + )]; let request = GetEndpointsRequest { request_header: make_request_header(), endpoint_url: endpoint_url.clone(), @@ -115,4 +118,4 @@ fn discovery_test() { assert!(!endpoints.is_empty()) } }); -} \ No newline at end of file +} diff --git a/server/src/tests/services/method.rs b/server/src/tests/services/method.rs index 7db42b8a4..072196701 100644 --- a/server/src/tests/services/method.rs +++ b/server/src/tests/services/method.rs @@ -5,15 +5,19 @@ use opcua_types::{ }; use crate::services::{ - method::MethodService, - monitored_item::MonitoredItemService, - subscription::SubscriptionService, + method::MethodService, monitored_item::MonitoredItemService, subscription::SubscriptionService, }; use super::*; fn do_method_service_test(f: F) - where F: FnOnce(Arc>, Arc>, Arc>, &MethodService) +where + F: FnOnce( + Arc>, + Arc>, + Arc>, + &MethodService, + ), { let st = ServiceTest::new(); @@ -24,8 +28,15 @@ fn do_method_service_test(f: F) f(server_state, session, address_space, &s); } -fn new_call_method_request(object_id: S, method_id: T, input_arguments: Option>) -> CallMethodRequest - where S: Into, T: Into { +fn new_call_method_request( + object_id: S, + method_id: T, + input_arguments: Option>, +) -> CallMethodRequest +where + S: Into, + T: Into, +{ CallMethodRequest { object_id: object_id.into(), method_id: method_id.into(), @@ -45,7 +56,14 @@ fn create_subscription_request() -> CreateSubscriptionRequest { } } -fn create_monitored_items_request(subscription_id: u32, client_handle: u32, node_id: T) -> CreateMonitoredItemsRequest where T: 'static + Into { +fn create_monitored_items_request( + subscription_id: u32, + client_handle: u32, + node_id: T, +) -> CreateMonitoredItemsRequest +where + T: 'static + Into, +{ CreateMonitoredItemsRequest { request_header: RequestHeader::dummy(), subscription_id, @@ -70,11 +88,22 @@ fn create_monitored_items_request(subscription_id: u32, client_handle: u32, n } /// This is a convenience for tests -fn call_single(s: &MethodService, server_state: Arc>, session: Arc>, address_space: Arc>, request: CallMethodRequest) -> Result { - let response = s.call(server_state, session, address_space, &CallRequest { - request_header: RequestHeader::dummy(), - methods_to_call: Some(vec![request]), - }); +fn call_single( + s: &MethodService, + server_state: Arc>, + session: Arc>, + address_space: Arc>, + request: CallMethodRequest, +) -> Result { + let response = s.call( + server_state, + session, + address_space, + &CallRequest { + request_header: RequestHeader::dummy(), + methods_to_call: Some(vec![request]), + }, + ); let response: CallResponse = supported_message_as!(response, CallResponse); Ok(response.results.unwrap().remove(0)) } @@ -83,7 +112,8 @@ fn call_single(s: &MethodService, server_state: Arc>, sessio fn call_getmonitoreditems_invalid_object_id() { do_method_service_test(|server_state, session, address_space, s| { // Call without a valid object id - let request = new_call_method_request(NodeId::null(), MethodId::Server_GetMonitoredItems, None); + let request = + new_call_method_request(NodeId::null(), MethodId::Server_GetMonitoredItems, None); let response = call_single(s, server_state, session, address_space, request).unwrap(); assert_eq!(response.status_code, StatusCode::BadNodeIdUnknown); }); @@ -103,7 +133,8 @@ fn call_getmonitoreditems_invalid_method_id() { fn call_getmonitoreditems_no_args() { do_method_service_test(|server_state, session, address_space, s| { // Call without args - let request = new_call_method_request(ObjectId::Server, MethodId::Server_GetMonitoredItems, None); + let request = + new_call_method_request(ObjectId::Server, MethodId::Server_GetMonitoredItems, None); let response = call_single(s, server_state, session, address_space, request).unwrap(); assert_eq!(response.status_code, StatusCode::BadArgumentsMissing); }); @@ -114,7 +145,11 @@ fn call_getmonitoreditems_too_many_args() { do_method_service_test(|server_state, session, address_space, s| { // Call with too many args let args: Vec = vec![100.into(), 100.into()]; - let request = new_call_method_request(ObjectId::Server, MethodId::Server_GetMonitoredItems, Some(args)); + let request = new_call_method_request( + ObjectId::Server, + MethodId::Server_GetMonitoredItems, + Some(args), + ); let response = call_single(s, server_state, session, address_space, request).unwrap(); assert_eq!(response.status_code, StatusCode::BadTooManyArguments); }); @@ -125,7 +160,11 @@ fn call_getmonitoreditems_incorrect_args() { do_method_service_test(|server_state, session, address_space, s| { // Call with incorrect arg let args: Vec = vec![100u8.into()]; - let request = new_call_method_request(ObjectId::Server, MethodId::Server_GetMonitoredItems, Some(args)); + let request = new_call_method_request( + ObjectId::Server, + MethodId::Server_GetMonitoredItems, + Some(args), + ); let response = call_single(s, server_state, session, address_space, request).unwrap(); assert_eq!(response.status_code, StatusCode::BadInvalidArgument); }); @@ -136,7 +175,11 @@ fn call_getmonitoreditems_invalid_subscription_id() { do_method_service_test(|server_state, session, address_space, s| { // Call with invalid subscription id let args: Vec = vec![100u32.into()]; - let request = new_call_method_request(ObjectId::Server, MethodId::Server_GetMonitoredItems, Some(args)); + let request = new_call_method_request( + ObjectId::Server, + MethodId::Server_GetMonitoredItems, + Some(args), + ); let response = call_single(s, server_state, session, address_space, request).unwrap(); assert_eq!(response.status_code, StatusCode::BadSubscriptionIdInvalid); }); @@ -153,21 +196,47 @@ fn call_getmonitoreditems() { // Create a subscription with some monitored items where client handle is distinct let subscription_id = { let request = create_subscription_request(); - let response: CreateSubscriptionResponse = supported_message_as!(ss.create_subscription(server_state.clone(), session.clone(), &request), CreateSubscriptionResponse); + let response: CreateSubscriptionResponse = supported_message_as!( + ss.create_subscription(server_state.clone(), session.clone(), &request), + CreateSubscriptionResponse + ); response.subscription_id }; // Create a monitored item let monitored_item_id = { - let request = create_monitored_items_request(subscription_id, 999, VariableId::Server_ServerStatus_CurrentTime); - let response: CreateMonitoredItemsResponse = supported_message_as!(mis.create_monitored_items(server_state.clone(), session.clone(), address_space.clone(), &request), CreateMonitoredItemsResponse); + let request = create_monitored_items_request( + subscription_id, + 999, + VariableId::Server_ServerStatus_CurrentTime, + ); + let response: CreateMonitoredItemsResponse = supported_message_as!( + mis.create_monitored_items( + server_state.clone(), + session.clone(), + address_space.clone(), + &request + ), + CreateMonitoredItemsResponse + ); response.results.unwrap()[0].monitored_item_id }; // Call to get monitored items and verify handles let args: Vec = vec![subscription_id.into()]; - let request = new_call_method_request(ObjectId::Server, MethodId::Server_GetMonitoredItems, Some(args)); - let response = call_single(s, server_state.clone(), session.clone(), address_space.clone(), request).unwrap(); + let request = new_call_method_request( + ObjectId::Server, + MethodId::Server_GetMonitoredItems, + Some(args), + ); + let response = call_single( + s, + server_state.clone(), + session.clone(), + address_space.clone(), + request, + ) + .unwrap(); assert_eq!(response.status_code, StatusCode::Good); // There should be two output args, each a vector of u32 @@ -199,17 +268,32 @@ fn call_resend_data() { do_method_service_test(|server_state, session, address_space, s| { // Call without a valid object id { - let request = new_call_method_request(NodeId::null(), MethodId::Server_ResendData, None); - let response = call_single(s, server_state.clone(), session.clone(), address_space.clone(), request).unwrap(); + let request = + new_call_method_request(NodeId::null(), MethodId::Server_ResendData, None); + let response = call_single( + s, + server_state.clone(), + session.clone(), + address_space.clone(), + request, + ) + .unwrap(); assert_eq!(response.status_code, StatusCode::BadNodeIdUnknown); } - // Call with invalid subscription id { let args: Vec = vec![100u32.into()]; - let request = new_call_method_request(ObjectId::Server, MethodId::Server_ResendData, Some(args)); - let response = call_single(s, server_state.clone(), session.clone(), address_space.clone(), request).unwrap(); + let request = + new_call_method_request(ObjectId::Server, MethodId::Server_ResendData, Some(args)); + let response = call_single( + s, + server_state.clone(), + session.clone(), + address_space.clone(), + request, + ) + .unwrap(); assert_eq!(response.status_code, StatusCode::BadSubscriptionIdInvalid); } @@ -221,14 +305,25 @@ fn call_resend_data() { // Create a subscription with some monitored items where client handle is distinct let subscription_id = { let request = create_subscription_request(); - let response: CreateSubscriptionResponse = supported_message_as!(ss.create_subscription(server_state.clone(), session.clone(), &request), CreateSubscriptionResponse); + let response: CreateSubscriptionResponse = supported_message_as!( + ss.create_subscription(server_state.clone(), session.clone(), &request), + CreateSubscriptionResponse + ); response.subscription_id }; // Call to get monitored items and verify handles let args: Vec = vec![subscription_id.into()]; - let request = new_call_method_request(ObjectId::Server, MethodId::Server_ResendData, Some(args)); - let response = call_single(s, server_state.clone(), session.clone(), address_space.clone(), request).unwrap(); + let request = + new_call_method_request(ObjectId::Server, MethodId::Server_ResendData, Some(args)); + let response = call_single( + s, + server_state.clone(), + session.clone(), + address_space.clone(), + request, + ) + .unwrap(); assert_eq!(response.status_code, StatusCode::Good); } }); diff --git a/server/src/tests/services/mod.rs b/server/src/tests/services/mod.rs index b23729918..9cb4fb010 100644 --- a/server/src/tests/services/mod.rs +++ b/server/src/tests/services/mod.rs @@ -1,14 +1,11 @@ use std::sync::{Arc, RwLock}; use crate::{ + comms::transport::Transport, prelude::*, - state::ServerState, + services::{monitored_item::MonitoredItemService, subscription::SubscriptionService}, session::Session, - services::{ - monitored_item::MonitoredItemService, - subscription::SubscriptionService, - }, - comms::transport::Transport, + state::ServerState, tests::*, }; @@ -51,41 +48,68 @@ fn make_request_header() -> RequestHeader { } } -fn var_name(idx: usize) -> String { format!("v{}", idx) } +fn var_name(idx: usize) -> String { + format!("v{}", idx) +} -fn var_node_id(idx: usize) -> NodeId { NodeId::new(1, var_name(idx)) } +fn var_node_id(idx: usize) -> NodeId { + NodeId::new(1, var_name(idx)) +} -fn add_many_vars_to_address_space(address_space: Arc>, vars_to_add: usize) -> (NodeId, Vec) { +fn add_many_vars_to_address_space( + address_space: Arc>, + vars_to_add: usize, +) -> (NodeId, Vec) { let mut address_space = trace_write_lock_unwrap!(address_space); // Create a sample folder under objects folder - let sample_folder_id = address_space.add_folder("Many Vars", "Many Vars", &NodeId::objects_folder_id()).unwrap(); + let sample_folder_id = address_space + .add_folder("Many Vars", "Many Vars", &NodeId::objects_folder_id()) + .unwrap(); // Add as a bunch of sequential vars to the folder - let node_ids: Vec = (0..vars_to_add).map(|i| { - let node_id = var_node_id(i); - let _ = VariableBuilder::new(&node_id, var_name(i),"") - .data_type(DataTypeId::Int32) - .organized_by(&sample_folder_id) - .value(i as i32) - .insert(&mut address_space); - node_id - }).collect(); + let node_ids: Vec = (0..vars_to_add) + .map(|i| { + let node_id = var_node_id(i); + let _ = VariableBuilder::new(&node_id, var_name(i), "") + .data_type(DataTypeId::Int32) + .organized_by(&sample_folder_id) + .value(i as i32) + .insert(&mut address_space); + node_id + }) + .collect(); (sample_folder_id, node_ids) } /// A helper that sets up a subscription service test fn do_subscription_service_test(f: T) - where T: FnOnce(Arc>, Arc>, Arc>, SubscriptionService, MonitoredItemService) +where + T: FnOnce( + Arc>, + Arc>, + Arc>, + SubscriptionService, + MonitoredItemService, + ), { let st = ServiceTest::new(); add_many_vars_to_address_space(st.address_space.clone(), 100); - f(st.server_state.clone(), st.session.clone(), st.address_space.clone(), SubscriptionService::new(), MonitoredItemService::new()); + f( + st.server_state.clone(), + st.session.clone(), + st.address_space.clone(), + SubscriptionService::new(), + MonitoredItemService::new(), + ); } /// Creates a blank subscription request -fn create_subscription_request(max_keep_alive_count: u32, lifetime_count: u32) -> CreateSubscriptionRequest { +fn create_subscription_request( + max_keep_alive_count: u32, + lifetime_count: u32, +) -> CreateSubscriptionRequest { CreateSubscriptionRequest { request_header: RequestHeader::dummy(), requested_publishing_interval: 100f64, @@ -98,25 +122,33 @@ fn create_subscription_request(max_keep_alive_count: u32, lifetime_count: u32) - } /// Creates a monitored item request -fn create_monitored_items_request(subscription_id: u32, node_id: Vec) -> CreateMonitoredItemsRequest - where T: Into { - let items_to_create = Some(node_id.into_iter() - .enumerate() - .map(|i| { - let node_id: NodeId = i.1.into(); - MonitoredItemCreateRequest { - item_to_monitor: node_id.into(), - monitoring_mode: MonitoringMode::Reporting, - requested_parameters: MonitoringParameters { - client_handle: i.0 as u32, - sampling_interval: 0.1, - filter: ExtensionObject::null(), - queue_size: 1, - discard_oldest: true, - }, - } - }) - .collect::>()); +fn create_monitored_items_request( + subscription_id: u32, + node_id: Vec, +) -> CreateMonitoredItemsRequest +where + T: Into, +{ + let items_to_create = Some( + node_id + .into_iter() + .enumerate() + .map(|i| { + let node_id: NodeId = i.1.into(); + MonitoredItemCreateRequest { + item_to_monitor: node_id.into(), + monitoring_mode: MonitoringMode::Reporting, + requested_parameters: MonitoringParameters { + client_handle: i.0 as u32, + sampling_interval: 0.1, + filter: ExtensionObject::null(), + queue_size: 1, + discard_oldest: true, + }, + } + }) + .collect::>(), + ); CreateMonitoredItemsRequest { request_header: RequestHeader::dummy(), subscription_id, @@ -125,7 +157,6 @@ fn create_monitored_items_request(subscription_id: u32, node_id: Vec) -> C } } - pub mod attribute; pub mod discovery; pub mod method; diff --git a/server/src/tests/services/monitored_item.rs b/server/src/tests/services/monitored_item.rs index 669beee4c..2198f7091 100644 --- a/server/src/tests/services/monitored_item.rs +++ b/server/src/tests/services/monitored_item.rs @@ -1,20 +1,17 @@ -use std::ops::Add; use std::collections::HashSet; +use std::ops::Add; use chrono::{self, Utc}; +use super::*; use crate::{ prelude::*, + services::{monitored_item::MonitoredItemService, subscription::SubscriptionService}, subscriptions::{ - subscription::{TickReason, SubscriptionState}, monitored_item::*, - }, - services::{ - subscription::SubscriptionService, - monitored_item::MonitoredItemService, + subscription::{SubscriptionState, TickReason}, }, }; -use super::*; fn test_var_node_id() -> NodeId { NodeId::new(1, 1) @@ -45,7 +42,13 @@ fn make_address_space() -> AddressSpace { address_space } -fn make_create_request(sampling_interval: Duration, queue_size: u32, node_id: NodeId, attribute_id: AttributeId, filter: ExtensionObject) -> MonitoredItemCreateRequest { +fn make_create_request( + sampling_interval: Duration, + queue_size: u32, + node_id: NodeId, + attribute_id: AttributeId, + filter: ExtensionObject, +) -> MonitoredItemCreateRequest { MonitoredItemCreateRequest { item_to_monitor: ReadValueId { node_id, @@ -64,55 +67,117 @@ fn make_create_request(sampling_interval: Duration, queue_size: u32, node_id: No } } -fn make_create_request_data_change_filter(sampling_interval: Duration, queue_size: u32) -> MonitoredItemCreateRequest { +fn make_create_request_data_change_filter( + sampling_interval: Duration, + queue_size: u32, +) -> MonitoredItemCreateRequest { // Encode a filter to an extension object - let filter = ExtensionObject::from_encodable(ObjectId::DataChangeFilter_Encoding_DefaultBinary, &DataChangeFilter { - trigger: DataChangeTrigger::StatusValueTimestamp, - deadband_type: DeadbandType::None as u32, - deadband_value: 0f64, - }); - make_create_request(sampling_interval, queue_size, test_var_node_id(), AttributeId::Value, filter) + let filter = ExtensionObject::from_encodable( + ObjectId::DataChangeFilter_Encoding_DefaultBinary, + &DataChangeFilter { + trigger: DataChangeTrigger::StatusValueTimestamp, + deadband_type: DeadbandType::None as u32, + deadband_value: 0f64, + }, + ); + make_create_request( + sampling_interval, + queue_size, + test_var_node_id(), + AttributeId::Value, + filter, + ) } -fn make_create_request_event_filter(sampling_interval: Duration, queue_size: u32) -> MonitoredItemCreateRequest { - let filter = ExtensionObject::from_encodable(ObjectId::EventFilter_Encoding_DefaultBinary, &EventFilter { - where_clause: ContentFilter { - elements: None +fn make_create_request_event_filter( + sampling_interval: Duration, + queue_size: u32, +) -> MonitoredItemCreateRequest { + let filter = ExtensionObject::from_encodable( + ObjectId::EventFilter_Encoding_DefaultBinary, + &EventFilter { + where_clause: ContentFilter { elements: None }, + select_clauses: Some(vec![ + SimpleAttributeOperand::new( + ObjectTypeId::BaseEventType, + "EventId", + AttributeId::Value, + UAString::null(), + ), + SimpleAttributeOperand::new( + ObjectTypeId::BaseEventType, + "SourceNode", + AttributeId::Value, + UAString::null(), + ), + ]), }, - select_clauses: Some(vec![ - SimpleAttributeOperand::new(ObjectTypeId::BaseEventType, "EventId", AttributeId::Value, UAString::null()), - SimpleAttributeOperand::new(ObjectTypeId::BaseEventType, "SourceNode", AttributeId::Value, UAString::null()), - ]), - }); - make_create_request(sampling_interval, queue_size, test_object_node_id(), AttributeId::EventNotifier, filter) + ); + make_create_request( + sampling_interval, + queue_size, + test_object_node_id(), + AttributeId::EventNotifier, + filter, + ) } -fn set_monitoring_mode(session: Arc>, subscription_id: u32, monitored_item_id: u32, monitoring_mode: MonitoringMode, mis: &MonitoredItemService) { +fn set_monitoring_mode( + session: Arc>, + subscription_id: u32, + monitored_item_id: u32, + monitoring_mode: MonitoringMode, + mis: &MonitoredItemService, +) { let request = SetMonitoringModeRequest { request_header: RequestHeader::dummy(), subscription_id, monitoring_mode, monitored_item_ids: Some(vec![monitored_item_id]), }; - let response: SetMonitoringModeResponse = supported_message_as!(mis.set_monitoring_mode(session, &request), SetMonitoringModeResponse); + let response: SetMonitoringModeResponse = supported_message_as!( + mis.set_monitoring_mode(session, &request), + SetMonitoringModeResponse + ); let results = response.results.unwrap(); assert_eq!(results.len(), 1); assert_eq!(results[0], StatusCode::Good); } -fn set_triggering(session: Arc>, subscription_id: u32, monitored_item_id: u32, links_to_add: &[u32], links_to_remove: &[u32], mis: &MonitoredItemService) -> (Option>, Option>) { +fn set_triggering( + session: Arc>, + subscription_id: u32, + monitored_item_id: u32, + links_to_add: &[u32], + links_to_remove: &[u32], + mis: &MonitoredItemService, +) -> (Option>, Option>) { let request = SetTriggeringRequest { request_header: RequestHeader::dummy(), subscription_id, triggering_item_id: monitored_item_id, - links_to_add: if links_to_add.is_empty() { None } else { Some(links_to_add.to_vec()) }, - links_to_remove: if links_to_remove.is_empty() { None } else { Some(links_to_remove.to_vec()) }, + links_to_add: if links_to_add.is_empty() { + None + } else { + Some(links_to_add.to_vec()) + }, + links_to_remove: if links_to_remove.is_empty() { + None + } else { + Some(links_to_remove.to_vec()) + }, }; - let response: SetTriggeringResponse = supported_message_as!(mis.set_triggering(session, &request), SetTriggeringResponse); + let response: SetTriggeringResponse = + supported_message_as!(mis.set_triggering(session, &request), SetTriggeringResponse); (response.add_results, response.remove_results) } -fn publish_request(now: &DateTimeUtc, session: Arc>, address_space: Arc>, ss: &SubscriptionService) { +fn publish_request( + now: &DateTimeUtc, + session: Arc>, + address_space: Arc>, + ss: &SubscriptionService, +) { let request_id = 1001; let request = PublishRequest { request_header: RequestHeader::dummy(), @@ -124,34 +189,65 @@ fn publish_request(now: &DateTimeUtc, session: Arc>, address_spa session.subscriptions_mut().publish_request_queue().clear(); } - let response = ss.async_publish(now, session.clone(), address_space.clone(), request_id, &request); + let response = ss.async_publish( + now, + session.clone(), + address_space.clone(), + request_id, + &request, + ); assert!(response.is_none()); let mut session = trace_write_lock_unwrap!(session); - assert!(!session.subscriptions_mut().publish_request_queue().is_empty()); + assert!(!session + .subscriptions_mut() + .publish_request_queue() + .is_empty()); } fn publish_response(session: Arc>) -> PublishResponse { let mut session = trace_write_lock_unwrap!(session); - let response = session.subscriptions_mut().publish_response_queue().pop_back().unwrap().response; + let response = session + .subscriptions_mut() + .publish_response_queue() + .pop_back() + .unwrap() + .response; let response: PublishResponse = supported_message_as!(response, PublishResponse); response } -fn publish_tick_no_response(session: Arc>, ss: &SubscriptionService, address_space: Arc>, now: DateTimeUtc, duration: chrono::Duration) -> DateTimeUtc { +fn publish_tick_no_response( + session: Arc>, + ss: &SubscriptionService, + address_space: Arc>, + now: DateTimeUtc, + duration: chrono::Duration, +) -> DateTimeUtc { publish_request(&now, session.clone(), address_space.clone(), ss); let now = now.add(duration); let mut session = trace_write_lock_unwrap!(session); let address_space = trace_read_lock_unwrap!(address_space); let _ = session.tick_subscriptions(&now, &address_space, TickReason::TickTimerFired); - assert_eq!(session.subscriptions_mut().publish_response_queue().len(), 0); + assert_eq!( + session.subscriptions_mut().publish_response_queue().len(), + 0 + ); now } /// Does a publish, ticks by a duration and then calls the function to handle the response. The /// new timestamp is returned so it can be called again. -fn publish_tick_response(session: Arc>, ss: &SubscriptionService, address_space: Arc>, now: DateTimeUtc, duration: chrono::Duration, handler: T) -> DateTimeUtc - where T: FnOnce(PublishResponse) +fn publish_tick_response( + session: Arc>, + ss: &SubscriptionService, + address_space: Arc>, + now: DateTimeUtc, + duration: chrono::Duration, + handler: T, +) -> DateTimeUtc +where + T: FnOnce(PublishResponse), { publish_request(&now, session.clone(), address_space.clone(), ss); let now = now.add(duration); @@ -159,7 +255,10 @@ fn publish_tick_response(session: Arc>, ss: &SubscriptionServ let mut session = trace_write_lock_unwrap!(session); let address_space = trace_read_lock_unwrap!(address_space); let _ = session.tick_subscriptions(&now, &address_space, TickReason::TickTimerFired); - assert_eq!(session.subscriptions_mut().publish_response_queue().len(), 1); + assert_eq!( + session.subscriptions_mut().publish_response_queue().len(), + 1 + ); } let response = publish_response(session.clone()); handler(response); @@ -168,7 +267,13 @@ fn publish_tick_response(session: Arc>, ss: &SubscriptionServ fn populate_monitored_item(discard_oldest: bool) -> MonitoredItem { let client_handle = 999; - let mut monitored_item = MonitoredItem::new(&chrono::Utc::now(), 1, TimestampsToReturn::Both, &make_create_request_data_change_filter(-1f64, 5)).unwrap(); + let mut monitored_item = MonitoredItem::new( + &chrono::Utc::now(), + 1, + TimestampsToReturn::Both, + &make_create_request_data_change_filter(-1f64, 5), + ) + .unwrap(); monitored_item.set_discard_oldest(discard_oldest); for i in 0..5 { monitored_item.enqueue_notification_message(MonitoredItemNotification { @@ -253,7 +358,6 @@ fn data_change_filter_test() { assert_eq!(filter.compare(&v1, &v2, None), false); } - #[test] fn data_change_deadband_abs_test() { let filter = DataChangeFilter { @@ -304,18 +408,39 @@ fn deadband_abs() { assert_eq!(DataChangeFilter::abs_compare(100f64, 100f64, 1f64), true); assert_eq!(DataChangeFilter::abs_compare(100f64, 101f64, 1f64), true); assert_eq!(DataChangeFilter::abs_compare(101f64, 100f64, 1f64), true); - assert_eq!(DataChangeFilter::abs_compare(101.001f64, 100f64, 1f64), false); - assert_eq!(DataChangeFilter::abs_compare(100f64, 101.001f64, 1f64), false); + assert_eq!( + DataChangeFilter::abs_compare(101.001f64, 100f64, 1f64), + false + ); + assert_eq!( + DataChangeFilter::abs_compare(100f64, 101.001f64, 1f64), + false + ); } // Straight tests of pct function #[test] fn deadband_pct() { - assert_eq!(DataChangeFilter::pct_compare(100f64, 101f64, 0f64, 100f64, 0f64), false); - assert_eq!(DataChangeFilter::pct_compare(100f64, 101f64, 0f64, 100f64, 1f64), true); - assert_eq!(DataChangeFilter::pct_compare(100f64, 101.0001f64, 0f64, 100f64, 1f64), false); - assert_eq!(DataChangeFilter::pct_compare(101.0001f64, 100f64, 0f64, 100f64, 1f64), false); - assert_eq!(DataChangeFilter::pct_compare(101.0001f64, 100f64, 0f64, 100f64, 1.0002f64), true); + assert_eq!( + DataChangeFilter::pct_compare(100f64, 101f64, 0f64, 100f64, 0f64), + false + ); + assert_eq!( + DataChangeFilter::pct_compare(100f64, 101f64, 0f64, 100f64, 1f64), + true + ); + assert_eq!( + DataChangeFilter::pct_compare(100f64, 101.0001f64, 0f64, 100f64, 1f64), + false + ); + assert_eq!( + DataChangeFilter::pct_compare(101.0001f64, 100f64, 0f64, 100f64, 1f64), + false + ); + assert_eq!( + DataChangeFilter::pct_compare(101.0001f64, 100f64, 0f64, 100f64, 1.0002f64), + true + ); } #[test] @@ -325,36 +450,61 @@ fn monitored_item_data_change_filter() { // Create request should monitor attribute of variable, e.g. value // Sample interval is negative so it will always test on repeated calls - let mut monitored_item = MonitoredItem::new(&chrono::Utc::now(), 1, TimestampsToReturn::Both, &make_create_request_data_change_filter(-1f64, 5)).unwrap(); + let mut monitored_item = MonitoredItem::new( + &chrono::Utc::now(), + 1, + TimestampsToReturn::Both, + &make_create_request_data_change_filter(-1f64, 5), + ) + .unwrap(); let now = Utc::now(); assert_eq!(monitored_item.notification_queue().len(), 0); // Expect first call to always succeed - assert_eq!(monitored_item.tick(&now, &address_space, true, false), TickResult::ReportValueChanged); + assert_eq!( + monitored_item.tick(&now, &address_space, true, false), + TickResult::ReportValueChanged + ); // Expect one item in its queue assert_eq!(monitored_item.notification_queue().len(), 1); // Expect false on next tick, with the same value because no subscription timer has fired - assert_eq!(monitored_item.tick(&now, &address_space, false, false), TickResult::NoChange); + assert_eq!( + monitored_item.tick(&now, &address_space, false, false), + TickResult::NoChange + ); assert_eq!(monitored_item.notification_queue().len(), 1); // Expect false because publish timer elapses but value has not changed changed - assert_eq!(monitored_item.tick(&now, &address_space, false, false), TickResult::NoChange); + assert_eq!( + monitored_item.tick(&now, &address_space, false, false), + TickResult::NoChange + ); assert_eq!(monitored_item.notification_queue().len(), 1); // adjust variable value - if let &mut NodeType::Variable(ref mut node) = address_space.find_node_mut(&test_var_node_id()).unwrap() { - let _ = node.set_value(NumericRange::None, Variant::UInt32(1)).unwrap(); + if let &mut NodeType::Variable(ref mut node) = + address_space.find_node_mut(&test_var_node_id()).unwrap() + { + let _ = node + .set_value(NumericRange::None, Variant::UInt32(1)) + .unwrap(); } else { panic!("Expected a variable, didn't get one!!"); } // Expect change but only when subscription timer elapsed - assert_eq!(monitored_item.tick(&now, &address_space, false, false), TickResult::NoChange); - assert_eq!(monitored_item.tick(&now, &address_space, true, false), TickResult::ReportValueChanged); + assert_eq!( + monitored_item.tick(&now, &address_space, false, false), + TickResult::NoChange + ); + assert_eq!( + monitored_item.tick(&now, &address_space, true, false), + TickResult::ReportValueChanged + ); assert_eq!(monitored_item.notification_queue().len(), 2); } @@ -366,30 +516,49 @@ fn monitored_item_event_filter() { // Create request should monitor attribute of variable, e.g. value // Sample interval is negative so it will always test on repeated calls - let mut monitored_item = MonitoredItem::new(&chrono::Utc::now(), 1, TimestampsToReturn::Both, &make_create_request_event_filter(-1f64, 5)).unwrap(); + let mut monitored_item = MonitoredItem::new( + &chrono::Utc::now(), + 1, + TimestampsToReturn::Both, + &make_create_request_event_filter(-1f64, 5), + ) + .unwrap(); let mut now = Utc::now(); // Verify tick does nothing - assert_eq!(monitored_item.tick(&now, &address_space, false, false), TickResult::NoChange); + assert_eq!( + monitored_item.tick(&now, &address_space, false, false), + TickResult::NoChange + ); now = now + chrono::Duration::milliseconds(100); // Raise an event let event_id = NodeId::new(ns, "Event1"); let event_type_id = ObjectTypeId::BaseEventType; - let mut event = BaseEventType::new(&event_id, event_type_id, "Event1", "", NodeId::objects_folder_id(), DateTime::from(now)) - .source_node(test_object_node_id()); + let mut event = BaseEventType::new( + &event_id, + event_type_id, + "Event1", + "", + NodeId::objects_folder_id(), + DateTime::from(now), + ) + .source_node(test_object_node_id()); assert!(event.raise(&mut address_space).is_ok()); // Verify that event comes back - assert_eq!(monitored_item.tick(&now, &address_space, true, false), TickResult::ReportValueChanged); + assert_eq!( + monitored_item.tick(&now, &address_space, true, false), + TickResult::ReportValueChanged + ); // Look at monitored item queue assert_eq!(monitored_item.notification_queue().len(), 1); let event = match monitored_item.oldest_notification_message().unwrap() { Notification::Event(event) => event, - _ => panic!() + _ => panic!(), }; // Verify EventFieldList @@ -401,186 +570,398 @@ fn monitored_item_event_filter() { let event_id = event_fields.remove(0); match event_id { Variant::ByteString(value) => assert_eq!(value.value.unwrap().len(), 16), - _ => panic!() + _ => panic!(), } // Source node should point to the originating object let event_source_node = event_fields.remove(0); match event_source_node { Variant::NodeId(source_node) => assert_eq!(*source_node, test_object_node_id()), - _ => panic!() + _ => panic!(), } // Tick again (nothing expected) now = now + chrono::Duration::milliseconds(100); - assert_eq!(monitored_item.tick(&now, &address_space, false, false), TickResult::NoChange); + assert_eq!( + monitored_item.tick(&now, &address_space, false, false), + TickResult::NoChange + ); // Raise an event on another object, expect nothing in the tick about it let event_id = NodeId::new(ns, "Event2"); let event_type_id = ObjectTypeId::BaseEventType; - let mut event = BaseEventType::new(&event_id, event_type_id, "Event2", "", NodeId::objects_folder_id(), DateTime::from(now)) - .source_node(ObjectId::Server); + let mut event = BaseEventType::new( + &event_id, + event_type_id, + "Event2", + "", + NodeId::objects_folder_id(), + DateTime::from(now), + ) + .source_node(ObjectId::Server); assert!(event.raise(&mut address_space).is_ok()); now = now + chrono::Duration::milliseconds(100); - assert_eq!(monitored_item.tick(&now, &address_space, false, false), TickResult::NoChange); + assert_eq!( + monitored_item.tick(&now, &address_space, false, false), + TickResult::NoChange + ); } /// Test to ensure create monitored items returns an error for an unknown node id #[test] fn unknown_node_id() { - do_subscription_service_test(|server_state, session, address_space, ss: SubscriptionService, mis: MonitoredItemService| { - // Create subscription - let subscription_id = { - let request = create_subscription_request(0, 0); - let response: CreateSubscriptionResponse = supported_message_as!(ss.create_subscription(server_state.clone(), session.clone(), &request), CreateSubscriptionResponse); - response.subscription_id - }; - - let request = create_monitored_items_request(subscription_id, vec![ - NodeId::new(1, var_name(1)), - NodeId::new(99, "Doesn't exist") - ]); - - let response: CreateMonitoredItemsResponse = supported_message_as!(mis.create_monitored_items(server_state.clone(), session.clone(), address_space.clone(), &request), CreateMonitoredItemsResponse); - let results = response.results.unwrap(); - assert_eq!(results.len(), 2); - assert_eq!(results.get(0).as_ref().unwrap().status_code, StatusCode::Good); - assert_eq!(results.get(1).as_ref().unwrap().status_code, StatusCode::BadNodeIdUnknown); - }); + do_subscription_service_test( + |server_state, + session, + address_space, + ss: SubscriptionService, + mis: MonitoredItemService| { + // Create subscription + let subscription_id = { + let request = create_subscription_request(0, 0); + let response: CreateSubscriptionResponse = supported_message_as!( + ss.create_subscription(server_state.clone(), session.clone(), &request), + CreateSubscriptionResponse + ); + response.subscription_id + }; + + let request = create_monitored_items_request( + subscription_id, + vec![ + NodeId::new(1, var_name(1)), + NodeId::new(99, "Doesn't exist"), + ], + ); + + let response: CreateMonitoredItemsResponse = supported_message_as!( + mis.create_monitored_items( + server_state.clone(), + session.clone(), + address_space.clone(), + &request + ), + CreateMonitoredItemsResponse + ); + let results = response.results.unwrap(); + assert_eq!(results.len(), 2); + assert_eq!( + results.get(0).as_ref().unwrap().status_code, + StatusCode::Good + ); + assert_eq!( + results.get(1).as_ref().unwrap().status_code, + StatusCode::BadNodeIdUnknown + ); + }, + ); } #[test] fn monitored_item_triggers() { - do_subscription_service_test(|server_state, session, address_space, ss: SubscriptionService, mis: MonitoredItemService| { - // Create subscription - let subscription_id = { - let request = create_subscription_request(0, 0); - let response: CreateSubscriptionResponse = supported_message_as!(ss.create_subscription(server_state.clone(), session.clone(), &request), CreateSubscriptionResponse); - response.subscription_id - }; - - { - let mut session = trace_write_lock_unwrap!(session); - session.subscriptions_mut().get_mut(subscription_id).unwrap().set_state(SubscriptionState::Normal); - } - - let max_monitored_items: usize = 4; - - let triggering_node = NodeId::new(1, var_name(0)); - // create 4 monitored items - let request = create_monitored_items_request(subscription_id, vec![ - triggering_node.clone(), - NodeId::new(1, var_name(1)), - NodeId::new(1, var_name(2)), - NodeId::new(1, var_name(3)), - ]); - let response: CreateMonitoredItemsResponse = supported_message_as!(mis.create_monitored_items(server_state.clone(), session.clone(), address_space.clone(), &request), CreateMonitoredItemsResponse); - - // The first monitored item will be the triggering item, the other 3 will be triggered items - let monitored_item_ids: Vec = response.results.unwrap().iter().map(|mir| { - assert_eq!(mir.status_code, StatusCode::Good); - mir.monitored_item_id - }).collect(); - assert_eq!(monitored_item_ids.len(), max_monitored_items); - - let triggering_item_id = monitored_item_ids[0]; - let triggered_item_ids = &monitored_item_ids[1..]; - - // set 3 monitored items to be reporting, sampling, disabled respectively - set_monitoring_mode(session.clone(), subscription_id, triggered_item_ids[0], MonitoringMode::Reporting, &mis); - set_monitoring_mode(session.clone(), subscription_id, triggered_item_ids[1], MonitoringMode::Sampling, &mis); - set_monitoring_mode(session.clone(), subscription_id, triggered_item_ids[2], MonitoringMode::Disabled, &mis); - - // set 1 monitored item to trigger other 3 plus itself - let (add_results, remove_results) = set_triggering(session.clone(), subscription_id, monitored_item_ids[0], &[monitored_item_ids[0], monitored_item_ids[1], monitored_item_ids[2], monitored_item_ids[3]], &[], &mis); - - // expect all adds to succeed except the one to itself - assert!(remove_results.is_none()); - let add_results = add_results.unwrap(); - assert_eq!(add_results[0], StatusCode::BadMonitoredItemIdInvalid); - assert_eq!(add_results[1], StatusCode::Good); - assert_eq!(add_results[2], StatusCode::Good); - assert_eq!(add_results[3], StatusCode::Good); - - let now = Utc::now(); - - // publish on the monitored item - let now = publish_tick_response(session.clone(), &ss, address_space.clone(), now, chrono::Duration::seconds(2), |response| { - let (notifications, events) = response.notification_message.notifications(&DecodingLimits::default()).unwrap(); - assert_eq!(notifications.len(), 1); - assert!(events.is_empty()); - let monitored_items = notifications[0].monitored_items.as_ref().unwrap(); - assert_eq!(monitored_items.len(), 3); - let client_handles: HashSet = monitored_items.iter().map(|min| min.client_handle).collect(); - // expect a notification to be for triggering item - assert!(client_handles.contains(&0)); - // expect a notification to be for triggered[0] (reporting) because it's reporting - assert!(client_handles.contains(&1)); - // expect a notification to be for triggered[1] (sampling) - assert!(client_handles.contains(&2)); - }); - - // do a publish on the monitored item, expect no notification because nothing has changed - let now = publish_tick_no_response(session.clone(), &ss, address_space.clone(), now, chrono::Duration::seconds(2)); - - // set monitoring mode of all 3 to reporting. - set_monitoring_mode(session.clone(), subscription_id, triggered_item_ids[0], MonitoringMode::Reporting, &mis); - set_monitoring_mode(session.clone(), subscription_id, triggered_item_ids[1], MonitoringMode::Reporting, &mis); - set_monitoring_mode(session.clone(), subscription_id, triggered_item_ids[2], MonitoringMode::Reporting, &mis); - - // Change the triggering item's value - { - let mut address_space = trace_write_lock_unwrap!(address_space); - let _ = address_space.set_variable_value(triggering_node.clone(), 1, &DateTime::from(now.clone()), &DateTime::from(now.clone())); - } - - // In this case, the triggering item changes, but triggered items are all reporting so are ignored unless they themselves - // need to report. Only 3 will fire because it was disabled previously - let now = publish_tick_response(session.clone(), &ss, address_space.clone(), now, chrono::Duration::seconds(2), |response| { - let (notifications, events) = response.notification_message.notifications(&DecodingLimits::default()).unwrap(); - assert_eq!(notifications.len(), 1); - assert!(events.is_empty()); - let monitored_items = notifications[0].monitored_items.as_ref().unwrap(); - let client_handles: HashSet = monitored_items.iter().map(|min| min.client_handle).collect(); - assert_eq!(monitored_items.len(), 2); - assert!(client_handles.contains(&0)); - assert!(client_handles.contains(&3)); - }); - - // revert to 3 items to be reporting, sampling, disabled - set_monitoring_mode(session.clone(), subscription_id, triggered_item_ids[0], MonitoringMode::Reporting, &mis); - set_monitoring_mode(session.clone(), subscription_id, triggered_item_ids[1], MonitoringMode::Sampling, &mis); - set_monitoring_mode(session.clone(), subscription_id, triggered_item_ids[2], MonitoringMode::Disabled, &mis); - - // change monitoring mode of triggering item to sampling and change value - set_monitoring_mode(session.clone(), subscription_id, triggering_item_id, MonitoringMode::Sampling, &mis); - { - let mut address_space = trace_write_lock_unwrap!(address_space); - let _ = address_space.set_variable_value(triggering_node.clone(), 2, &DateTime::from(now.clone()), &DateTime::from(now.clone())); - } - - // do a publish on the monitored item, - let now = publish_tick_response(session.clone(), &ss, address_space.clone(), now, chrono::Duration::seconds(2), |response| { - // expect only 1 data change corresponding to sampling triggered item - let (notifications, events) = response.notification_message.notifications(&DecodingLimits::default()).unwrap(); - assert_eq!(notifications.len(), 1); - assert!(events.is_empty()); - let monitored_items = notifications[0].monitored_items.as_ref().unwrap(); - let client_handles: HashSet = monitored_items.iter().map(|min| min.client_handle).collect(); - assert_eq!(monitored_items.len(), 1); - assert!(client_handles.contains(&2)); - }); - - // change monitoring mode of triggering item to disable - set_monitoring_mode(session.clone(), subscription_id, triggering_item_id, MonitoringMode::Disabled, &mis); - { - let mut address_space = trace_write_lock_unwrap!(address_space); - let _ = address_space.set_variable_value(triggering_node.clone(), 3, &DateTime::from(now.clone()), &DateTime::from(now.clone())); - } - - // do a publish on the monitored item, expect 0 data changes - let _ = publish_tick_no_response(session.clone(), &ss, address_space.clone(), now, chrono::Duration::seconds(2)); - }); + do_subscription_service_test( + |server_state, + session, + address_space, + ss: SubscriptionService, + mis: MonitoredItemService| { + // Create subscription + let subscription_id = { + let request = create_subscription_request(0, 0); + let response: CreateSubscriptionResponse = supported_message_as!( + ss.create_subscription(server_state.clone(), session.clone(), &request), + CreateSubscriptionResponse + ); + response.subscription_id + }; + + { + let mut session = trace_write_lock_unwrap!(session); + session + .subscriptions_mut() + .get_mut(subscription_id) + .unwrap() + .set_state(SubscriptionState::Normal); + } + + let max_monitored_items: usize = 4; + + let triggering_node = NodeId::new(1, var_name(0)); + // create 4 monitored items + let request = create_monitored_items_request( + subscription_id, + vec![ + triggering_node.clone(), + NodeId::new(1, var_name(1)), + NodeId::new(1, var_name(2)), + NodeId::new(1, var_name(3)), + ], + ); + let response: CreateMonitoredItemsResponse = supported_message_as!( + mis.create_monitored_items( + server_state.clone(), + session.clone(), + address_space.clone(), + &request + ), + CreateMonitoredItemsResponse + ); + + // The first monitored item will be the triggering item, the other 3 will be triggered items + let monitored_item_ids: Vec = response + .results + .unwrap() + .iter() + .map(|mir| { + assert_eq!(mir.status_code, StatusCode::Good); + mir.monitored_item_id + }) + .collect(); + assert_eq!(monitored_item_ids.len(), max_monitored_items); + + let triggering_item_id = monitored_item_ids[0]; + let triggered_item_ids = &monitored_item_ids[1..]; + + // set 3 monitored items to be reporting, sampling, disabled respectively + set_monitoring_mode( + session.clone(), + subscription_id, + triggered_item_ids[0], + MonitoringMode::Reporting, + &mis, + ); + set_monitoring_mode( + session.clone(), + subscription_id, + triggered_item_ids[1], + MonitoringMode::Sampling, + &mis, + ); + set_monitoring_mode( + session.clone(), + subscription_id, + triggered_item_ids[2], + MonitoringMode::Disabled, + &mis, + ); + + // set 1 monitored item to trigger other 3 plus itself + let (add_results, remove_results) = set_triggering( + session.clone(), + subscription_id, + monitored_item_ids[0], + &[ + monitored_item_ids[0], + monitored_item_ids[1], + monitored_item_ids[2], + monitored_item_ids[3], + ], + &[], + &mis, + ); + + // expect all adds to succeed except the one to itself + assert!(remove_results.is_none()); + let add_results = add_results.unwrap(); + assert_eq!(add_results[0], StatusCode::BadMonitoredItemIdInvalid); + assert_eq!(add_results[1], StatusCode::Good); + assert_eq!(add_results[2], StatusCode::Good); + assert_eq!(add_results[3], StatusCode::Good); + + let now = Utc::now(); + + // publish on the monitored item + let now = publish_tick_response( + session.clone(), + &ss, + address_space.clone(), + now, + chrono::Duration::seconds(2), + |response| { + let (notifications, events) = response + .notification_message + .notifications(&DecodingLimits::default()) + .unwrap(); + assert_eq!(notifications.len(), 1); + assert!(events.is_empty()); + let monitored_items = notifications[0].monitored_items.as_ref().unwrap(); + assert_eq!(monitored_items.len(), 3); + let client_handles: HashSet = monitored_items + .iter() + .map(|min| min.client_handle) + .collect(); + // expect a notification to be for triggering item + assert!(client_handles.contains(&0)); + // expect a notification to be for triggered[0] (reporting) because it's reporting + assert!(client_handles.contains(&1)); + // expect a notification to be for triggered[1] (sampling) + assert!(client_handles.contains(&2)); + }, + ); + + // do a publish on the monitored item, expect no notification because nothing has changed + let now = publish_tick_no_response( + session.clone(), + &ss, + address_space.clone(), + now, + chrono::Duration::seconds(2), + ); + + // set monitoring mode of all 3 to reporting. + set_monitoring_mode( + session.clone(), + subscription_id, + triggered_item_ids[0], + MonitoringMode::Reporting, + &mis, + ); + set_monitoring_mode( + session.clone(), + subscription_id, + triggered_item_ids[1], + MonitoringMode::Reporting, + &mis, + ); + set_monitoring_mode( + session.clone(), + subscription_id, + triggered_item_ids[2], + MonitoringMode::Reporting, + &mis, + ); + + // Change the triggering item's value + { + let mut address_space = trace_write_lock_unwrap!(address_space); + let _ = address_space.set_variable_value( + triggering_node.clone(), + 1, + &DateTime::from(now.clone()), + &DateTime::from(now.clone()), + ); + } + + // In this case, the triggering item changes, but triggered items are all reporting so are ignored unless they themselves + // need to report. Only 3 will fire because it was disabled previously + let now = publish_tick_response( + session.clone(), + &ss, + address_space.clone(), + now, + chrono::Duration::seconds(2), + |response| { + let (notifications, events) = response + .notification_message + .notifications(&DecodingLimits::default()) + .unwrap(); + assert_eq!(notifications.len(), 1); + assert!(events.is_empty()); + let monitored_items = notifications[0].monitored_items.as_ref().unwrap(); + let client_handles: HashSet = monitored_items + .iter() + .map(|min| min.client_handle) + .collect(); + assert_eq!(monitored_items.len(), 2); + assert!(client_handles.contains(&0)); + assert!(client_handles.contains(&3)); + }, + ); + + // revert to 3 items to be reporting, sampling, disabled + set_monitoring_mode( + session.clone(), + subscription_id, + triggered_item_ids[0], + MonitoringMode::Reporting, + &mis, + ); + set_monitoring_mode( + session.clone(), + subscription_id, + triggered_item_ids[1], + MonitoringMode::Sampling, + &mis, + ); + set_monitoring_mode( + session.clone(), + subscription_id, + triggered_item_ids[2], + MonitoringMode::Disabled, + &mis, + ); + + // change monitoring mode of triggering item to sampling and change value + set_monitoring_mode( + session.clone(), + subscription_id, + triggering_item_id, + MonitoringMode::Sampling, + &mis, + ); + { + let mut address_space = trace_write_lock_unwrap!(address_space); + let _ = address_space.set_variable_value( + triggering_node.clone(), + 2, + &DateTime::from(now.clone()), + &DateTime::from(now.clone()), + ); + } + + // do a publish on the monitored item, + let now = publish_tick_response( + session.clone(), + &ss, + address_space.clone(), + now, + chrono::Duration::seconds(2), + |response| { + // expect only 1 data change corresponding to sampling triggered item + let (notifications, events) = response + .notification_message + .notifications(&DecodingLimits::default()) + .unwrap(); + assert_eq!(notifications.len(), 1); + assert!(events.is_empty()); + let monitored_items = notifications[0].monitored_items.as_ref().unwrap(); + let client_handles: HashSet = monitored_items + .iter() + .map(|min| min.client_handle) + .collect(); + assert_eq!(monitored_items.len(), 1); + assert!(client_handles.contains(&2)); + }, + ); + + // change monitoring mode of triggering item to disable + set_monitoring_mode( + session.clone(), + subscription_id, + triggering_item_id, + MonitoringMode::Disabled, + &mis, + ); + { + let mut address_space = trace_write_lock_unwrap!(address_space); + let _ = address_space.set_variable_value( + triggering_node.clone(), + 3, + &DateTime::from(now.clone()), + &DateTime::from(now.clone()), + ); + } + + // do a publish on the monitored item, expect 0 data changes + let _ = publish_tick_no_response( + session.clone(), + &ss, + address_space.clone(), + now, + chrono::Duration::seconds(2), + ); + }, + ); } #[test] diff --git a/server/src/tests/services/node_management.rs b/server/src/tests/services/node_management.rs index 4d1d8e9a6..2ddc970cc 100644 --- a/server/src/tests/services/node_management.rs +++ b/server/src/tests/services/node_management.rs @@ -1,14 +1,18 @@ -use opcua_types::node_ids::{ObjectId, ObjectTypeId, ReferenceTypeId, DataTypeId, MethodId}; +use opcua_types::node_ids::{DataTypeId, MethodId, ObjectId, ObjectTypeId, ReferenceTypeId}; -use crate::{ - services::node_management::NodeManagementService, -}; +use crate::services::node_management::NodeManagementService; use super::*; /// A helper that sets up a subscription service test fn do_node_management_service_test(can_modify_address_space: bool, f: T) - where T: FnOnce(Arc>, Arc>, Arc>, NodeManagementService) +where + T: FnOnce( + Arc>, + Arc>, + Arc>, + NodeManagementService, + ), { opcua_console_logging::init(); @@ -22,355 +26,529 @@ fn do_node_management_service_test(can_modify_address_space: bool, f: T) let _ = add_many_vars_to_address_space(st.address_space.clone(), 10); - f(st.server_state.clone(), st.session.clone(), st.address_space.clone(), NodeManagementService::new()); + f( + st.server_state.clone(), + st.session.clone(), + st.address_space.clone(), + NodeManagementService::new(), + ); } // A helper that adds one node and tests that the result matches the expected status code -fn do_add_node_test_with_expected_error(can_modify_address_space: bool, item: AddNodesItem, expected_status_code: StatusCode) { - do_node_management_service_test(can_modify_address_space, |server_state, session, address_space, nms| { - let response = nms.add_nodes(server_state, session, address_space.clone(), &AddNodesRequest { - request_header: RequestHeader::dummy(), - nodes_to_add: Some(vec![item]), - }); - let response: AddNodesResponse = supported_message_as!(response, AddNodesResponse); - let results = response.results.unwrap(); - assert_eq!(results.len(), 1); - assert_eq!(format!("{}", results[0].status_code), format!("{}", expected_status_code)); - if expected_status_code.is_good() { - assert_ne!(results[0].added_node_id, NodeId::null()); - let address_space = trace_read_lock_unwrap!(address_space); - assert!(address_space.find_node(&results[0].added_node_id).is_some()); - } else { - assert_eq!(results[0].added_node_id, NodeId::null()); - } - }); -} - -fn do_add_references_test(can_modify_address_space: bool, item: AddReferencesItem, expected_status_code: StatusCode) { - do_node_management_service_test(can_modify_address_space, |server_state, session, address_space, nms| { - let response = nms.add_references(server_state, session, address_space, &AddReferencesRequest { - request_header: RequestHeader::dummy(), - references_to_add: Some(vec![item]), - }); - let response: AddReferencesResponse = supported_message_as!(response, AddReferencesResponse); - let results = response.results.unwrap(); - assert_eq!(results.len(), 1); - assert_eq!(format!("{}", results[0]), format!("{}", expected_status_code)); - if expected_status_code.is_good() { - // TODO expect the reference to exist - } - }); -} - -fn do_delete_nodes_test(can_modify_address_space: bool, item: DeleteNodesItem, expected_status_code: StatusCode) { - do_node_management_service_test(can_modify_address_space, |server_state, session, address_space, nms| { - let response = nms.delete_nodes(server_state, session, address_space, &DeleteNodesRequest { - request_header: RequestHeader::dummy(), - nodes_to_delete: Some(vec![item]), - }); - let response: DeleteNodesResponse = supported_message_as!(response, DeleteNodesResponse); - let results = response.results.unwrap(); - assert_eq!(results.len(), 1); - assert_eq!(format!("{}", results[0]), format!("{}", expected_status_code)); - }); -} - -fn do_delete_references_test(can_modify_address_space: bool, item: DeleteReferencesItem, expected_status_code: StatusCode) { - do_node_management_service_test(can_modify_address_space, |server_state, session, address_space, nms| { - let response = nms.delete_references(server_state, session, address_space, &DeleteReferencesRequest { - request_header: RequestHeader::dummy(), - references_to_delete: Some(vec![item]), - }); - let response: DeleteReferencesResponse = supported_message_as!(response, DeleteReferencesResponse); - let results = response.results.unwrap(); - assert_eq!(results.len(), 1); - assert_eq!(format!("{}", results[0]), format!("{}", expected_status_code)); - }); -} - -fn object_attributes(display_name: T) -> ExtensionObject where T: Into { - let specified_attributes = AttributesMask::DISPLAY_NAME | - AttributesMask::DESCRIPTION | - AttributesMask::WRITE_MASK | - AttributesMask::USER_WRITE_MASK | - AttributesMask::EVENT_NOTIFIER; - - ExtensionObject::from_encodable(ObjectId::ObjectAttributes_Encoding_DefaultBinary, &ObjectAttributes { - specified_attributes: specified_attributes.bits(), - display_name: display_name.into(), - description: LocalizedText::new("", "description"), - write_mask: 0, - user_write_mask: 0, - event_notifier: 0, - }) -} - -fn variable_attributes(display_name: T) -> ExtensionObject where T: Into { - let specified_attributes = AttributesMask::DISPLAY_NAME | AttributesMask::ACCESS_LEVEL | AttributesMask::USER_ACCESS_LEVEL | - AttributesMask::DATA_TYPE | AttributesMask::HISTORIZING | AttributesMask::VALUE | AttributesMask::VALUE_RANK; - - ExtensionObject::from_encodable(ObjectId::VariableAttributes_Encoding_DefaultBinary, &VariableAttributes { - specified_attributes: specified_attributes.bits(), - display_name: display_name.into(), - description: LocalizedText::null(), - write_mask: 0, - user_write_mask: 0, - value: Variant::from(true), - data_type: DataTypeId::Boolean.into(), - value_rank: 1, - array_dimensions: None, - access_level: 1, - user_access_level: 2, - minimum_sampling_interval: 0.0, - historizing: false, - }) -} - -fn method_attributes(display_name: T) -> ExtensionObject where T: Into { - let specified_attributes = AttributesMask::DISPLAY_NAME | AttributesMask::EXECUTABLE | AttributesMask::USER_EXECUTABLE; - ExtensionObject::from_encodable(ObjectId::MethodAttributes_Encoding_DefaultBinary, &MethodAttributes { - specified_attributes: specified_attributes.bits(), - display_name: display_name.into(), - description: LocalizedText::null(), - write_mask: 0, - user_write_mask: 0, - executable: true, - user_executable: true, - }) +fn do_add_node_test_with_expected_error( + can_modify_address_space: bool, + item: AddNodesItem, + expected_status_code: StatusCode, +) { + do_node_management_service_test( + can_modify_address_space, + |server_state, session, address_space, nms| { + let response = nms.add_nodes( + server_state, + session, + address_space.clone(), + &AddNodesRequest { + request_header: RequestHeader::dummy(), + nodes_to_add: Some(vec![item]), + }, + ); + let response: AddNodesResponse = supported_message_as!(response, AddNodesResponse); + let results = response.results.unwrap(); + assert_eq!(results.len(), 1); + assert_eq!( + format!("{}", results[0].status_code), + format!("{}", expected_status_code) + ); + if expected_status_code.is_good() { + assert_ne!(results[0].added_node_id, NodeId::null()); + let address_space = trace_read_lock_unwrap!(address_space); + assert!(address_space.find_node(&results[0].added_node_id).is_some()); + } else { + assert_eq!(results[0].added_node_id, NodeId::null()); + } + }, + ); +} + +fn do_add_references_test( + can_modify_address_space: bool, + item: AddReferencesItem, + expected_status_code: StatusCode, +) { + do_node_management_service_test( + can_modify_address_space, + |server_state, session, address_space, nms| { + let response = nms.add_references( + server_state, + session, + address_space, + &AddReferencesRequest { + request_header: RequestHeader::dummy(), + references_to_add: Some(vec![item]), + }, + ); + let response: AddReferencesResponse = + supported_message_as!(response, AddReferencesResponse); + let results = response.results.unwrap(); + assert_eq!(results.len(), 1); + assert_eq!( + format!("{}", results[0]), + format!("{}", expected_status_code) + ); + if expected_status_code.is_good() { + // TODO expect the reference to exist + } + }, + ); +} + +fn do_delete_nodes_test( + can_modify_address_space: bool, + item: DeleteNodesItem, + expected_status_code: StatusCode, +) { + do_node_management_service_test( + can_modify_address_space, + |server_state, session, address_space, nms| { + let response = nms.delete_nodes( + server_state, + session, + address_space, + &DeleteNodesRequest { + request_header: RequestHeader::dummy(), + nodes_to_delete: Some(vec![item]), + }, + ); + let response: DeleteNodesResponse = + supported_message_as!(response, DeleteNodesResponse); + let results = response.results.unwrap(); + assert_eq!(results.len(), 1); + assert_eq!( + format!("{}", results[0]), + format!("{}", expected_status_code) + ); + }, + ); +} + +fn do_delete_references_test( + can_modify_address_space: bool, + item: DeleteReferencesItem, + expected_status_code: StatusCode, +) { + do_node_management_service_test( + can_modify_address_space, + |server_state, session, address_space, nms| { + let response = nms.delete_references( + server_state, + session, + address_space, + &DeleteReferencesRequest { + request_header: RequestHeader::dummy(), + references_to_delete: Some(vec![item]), + }, + ); + let response: DeleteReferencesResponse = + supported_message_as!(response, DeleteReferencesResponse); + let results = response.results.unwrap(); + assert_eq!(results.len(), 1); + assert_eq!( + format!("{}", results[0]), + format!("{}", expected_status_code) + ); + }, + ); +} + +fn object_attributes(display_name: T) -> ExtensionObject +where + T: Into, +{ + let specified_attributes = AttributesMask::DISPLAY_NAME + | AttributesMask::DESCRIPTION + | AttributesMask::WRITE_MASK + | AttributesMask::USER_WRITE_MASK + | AttributesMask::EVENT_NOTIFIER; + + ExtensionObject::from_encodable( + ObjectId::ObjectAttributes_Encoding_DefaultBinary, + &ObjectAttributes { + specified_attributes: specified_attributes.bits(), + display_name: display_name.into(), + description: LocalizedText::new("", "description"), + write_mask: 0, + user_write_mask: 0, + event_notifier: 0, + }, + ) +} + +fn variable_attributes(display_name: T) -> ExtensionObject +where + T: Into, +{ + let specified_attributes = AttributesMask::DISPLAY_NAME + | AttributesMask::ACCESS_LEVEL + | AttributesMask::USER_ACCESS_LEVEL + | AttributesMask::DATA_TYPE + | AttributesMask::HISTORIZING + | AttributesMask::VALUE + | AttributesMask::VALUE_RANK; + + ExtensionObject::from_encodable( + ObjectId::VariableAttributes_Encoding_DefaultBinary, + &VariableAttributes { + specified_attributes: specified_attributes.bits(), + display_name: display_name.into(), + description: LocalizedText::null(), + write_mask: 0, + user_write_mask: 0, + value: Variant::from(true), + data_type: DataTypeId::Boolean.into(), + value_rank: 1, + array_dimensions: None, + access_level: 1, + user_access_level: 2, + minimum_sampling_interval: 0.0, + historizing: false, + }, + ) +} + +fn method_attributes(display_name: T) -> ExtensionObject +where + T: Into, +{ + let specified_attributes = + AttributesMask::DISPLAY_NAME | AttributesMask::EXECUTABLE | AttributesMask::USER_EXECUTABLE; + ExtensionObject::from_encodable( + ObjectId::MethodAttributes_Encoding_DefaultBinary, + &MethodAttributes { + specified_attributes: specified_attributes.bits(), + display_name: display_name.into(), + description: LocalizedText::null(), + write_mask: 0, + user_write_mask: 0, + executable: true, + user_executable: true, + }, + ) } #[test] fn add_nodes_nothing_to_do() { // Empty request - do_node_management_service_test(true, |server_state, session, address_space, nms: NodeManagementService| { - let response = nms.add_nodes(server_state.clone(), session.clone(), address_space.clone(), &AddNodesRequest { - request_header: RequestHeader::dummy(), - nodes_to_add: None, - }); - let response: ServiceFault = supported_message_as!(response, ServiceFault); - assert_eq!(response.response_header.service_result, StatusCode::BadNothingToDo); - - let response = nms.add_nodes(server_state.clone(), session.clone(), address_space.clone(), &AddNodesRequest { - request_header: RequestHeader::dummy(), - nodes_to_add: Some(vec![]), - }); - let response: ServiceFault = supported_message_as!(response, ServiceFault); - assert_eq!(response.response_header.service_result, StatusCode::BadNothingToDo); - }); + do_node_management_service_test( + true, + |server_state, session, address_space, nms: NodeManagementService| { + let response = nms.add_nodes( + server_state.clone(), + session.clone(), + address_space.clone(), + &AddNodesRequest { + request_header: RequestHeader::dummy(), + nodes_to_add: None, + }, + ); + let response: ServiceFault = supported_message_as!(response, ServiceFault); + assert_eq!( + response.response_header.service_result, + StatusCode::BadNothingToDo + ); + + let response = nms.add_nodes( + server_state.clone(), + session.clone(), + address_space.clone(), + &AddNodesRequest { + request_header: RequestHeader::dummy(), + nodes_to_add: Some(vec![]), + }, + ); + let response: ServiceFault = supported_message_as!(response, ServiceFault); + assert_eq!( + response.response_header.service_result, + StatusCode::BadNothingToDo + ); + }, + ); } #[test] fn add_nodes_reference_type_id_invalid() { // Add a node with a null requested node id - do_add_node_test_with_expected_error(true, AddNodesItem { - parent_node_id: NodeId::root_folder_id().into(), - reference_type_id: NodeId::null(), // !!! - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Object, - node_attributes: object_attributes("foo"), - type_definition: ObjectTypeId::BaseObjectType.into(), - }, StatusCode::BadReferenceTypeIdInvalid); + do_add_node_test_with_expected_error( + true, + AddNodesItem { + parent_node_id: NodeId::root_folder_id().into(), + reference_type_id: NodeId::null(), // !!! + requested_new_node_id: ExpandedNodeId::null(), + browse_name: QualifiedName::from("boo"), + node_class: NodeClass::Object, + node_attributes: object_attributes("foo"), + type_definition: ObjectTypeId::BaseObjectType.into(), + }, + StatusCode::BadReferenceTypeIdInvalid, + ); } #[test] fn add_nodes_node_class_invalid() { // Invalid class - do_add_node_test_with_expected_error(true, - AddNodesItem { - parent_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Unspecified, // !!! - node_attributes: object_attributes("foo"), - type_definition: ObjectTypeId::BaseObjectType.into(), - }, StatusCode::BadNodeClassInvalid); + do_add_node_test_with_expected_error( + true, + AddNodesItem { + parent_node_id: ObjectId::ObjectsFolder.into(), + reference_type_id: ReferenceTypeId::Organizes.into(), + requested_new_node_id: ExpandedNodeId::null(), + browse_name: QualifiedName::from("boo"), + node_class: NodeClass::Unspecified, // !!! + node_attributes: object_attributes("foo"), + type_definition: ObjectTypeId::BaseObjectType.into(), + }, + StatusCode::BadNodeClassInvalid, + ); } #[test] fn add_nodes_parent_node_id_invalid() { // Add a node with an invalid parent id - do_add_node_test_with_expected_error(true, AddNodesItem { - parent_node_id: NodeId::new(100, "blahblah").into(), // !!! - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Object, - node_attributes: object_attributes("foo"), - type_definition: ObjectTypeId::BaseObjectType.into(), - }, StatusCode::BadParentNodeIdInvalid); + do_add_node_test_with_expected_error( + true, + AddNodesItem { + parent_node_id: NodeId::new(100, "blahblah").into(), // !!! + reference_type_id: ReferenceTypeId::Organizes.into(), + requested_new_node_id: ExpandedNodeId::null(), + browse_name: QualifiedName::from("boo"), + node_class: NodeClass::Object, + node_attributes: object_attributes("foo"), + type_definition: ObjectTypeId::BaseObjectType.into(), + }, + StatusCode::BadParentNodeIdInvalid, + ); } #[test] fn add_nodes_type_definition_invalid() { // Add a node with a missing type definition, when one is required // Object - do_add_node_test_with_expected_error(true, AddNodesItem { - parent_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Object, - node_attributes: object_attributes("foo"), - type_definition: ExpandedNodeId::null(), // !!! - }, StatusCode::BadTypeDefinitionInvalid); + do_add_node_test_with_expected_error( + true, + AddNodesItem { + parent_node_id: ObjectId::ObjectsFolder.into(), + reference_type_id: ReferenceTypeId::Organizes.into(), + requested_new_node_id: ExpandedNodeId::null(), + browse_name: QualifiedName::from("boo"), + node_class: NodeClass::Object, + node_attributes: object_attributes("foo"), + type_definition: ExpandedNodeId::null(), // !!! + }, + StatusCode::BadTypeDefinitionInvalid, + ); // Add a node with a missing type definition, when one is required // Variable - do_add_node_test_with_expected_error(true, AddNodesItem { - parent_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Variable, - node_attributes: variable_attributes("foo"), - type_definition: ExpandedNodeId::null(), // !!! - }, StatusCode::BadTypeDefinitionInvalid); + do_add_node_test_with_expected_error( + true, + AddNodesItem { + parent_node_id: ObjectId::ObjectsFolder.into(), + reference_type_id: ReferenceTypeId::Organizes.into(), + requested_new_node_id: ExpandedNodeId::null(), + browse_name: QualifiedName::from("boo"), + node_class: NodeClass::Variable, + node_attributes: variable_attributes("foo"), + type_definition: ExpandedNodeId::null(), // !!! + }, + StatusCode::BadTypeDefinitionInvalid, + ); // Add a node with a type definition when one is not required, e.g.. for Method - do_add_node_test_with_expected_error(true, AddNodesItem { - parent_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Method, - node_attributes: method_attributes("foo"), - type_definition: ObjectTypeId::AddressSpaceFileType.into(), // !!! - }, StatusCode::BadTypeDefinitionInvalid); + do_add_node_test_with_expected_error( + true, + AddNodesItem { + parent_node_id: ObjectId::ObjectsFolder.into(), + reference_type_id: ReferenceTypeId::Organizes.into(), + requested_new_node_id: ExpandedNodeId::null(), + browse_name: QualifiedName::from("boo"), + node_class: NodeClass::Method, + node_attributes: method_attributes("foo"), + type_definition: ObjectTypeId::AddressSpaceFileType.into(), // !!! + }, + StatusCode::BadTypeDefinitionInvalid, + ); // Add a node with an unrecognized type, something that is not a type at all - do_add_node_test_with_expected_error(true, AddNodesItem { - parent_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Variable, - node_attributes: variable_attributes("foo"), - type_definition: MethodId::ProgramStateMachineType_Start.into(), // !!! - }, StatusCode::BadTypeDefinitionInvalid); + do_add_node_test_with_expected_error( + true, + AddNodesItem { + parent_node_id: ObjectId::ObjectsFolder.into(), + reference_type_id: ReferenceTypeId::Organizes.into(), + requested_new_node_id: ExpandedNodeId::null(), + browse_name: QualifiedName::from("boo"), + node_class: NodeClass::Variable, + node_attributes: variable_attributes("foo"), + type_definition: MethodId::ProgramStateMachineType_Start.into(), // !!! + }, + StatusCode::BadTypeDefinitionInvalid, + ); } #[test] fn add_nodes_node_id_exists() { // Add a node where node id already exists - do_add_node_test_with_expected_error(true, AddNodesItem { - parent_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ObjectId::ObjectsFolder.into(), // !!! - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Variable, - node_attributes: variable_attributes("foo"), - type_definition: ExpandedNodeId::null(), - }, StatusCode::BadNodeIdExists); + do_add_node_test_with_expected_error( + true, + AddNodesItem { + parent_node_id: ObjectId::RootFolder.into(), + reference_type_id: ReferenceTypeId::Organizes.into(), + requested_new_node_id: ObjectId::ObjectsFolder.into(), // !!! + browse_name: QualifiedName::from("boo"), + node_class: NodeClass::Variable, + node_attributes: variable_attributes("foo"), + type_definition: ExpandedNodeId::null(), + }, + StatusCode::BadNodeIdExists, + ); } #[test] fn add_nodes_mismatching_class_and_attributes_exists() { // Add a node where node class does not match the supplied node attributes - do_add_node_test_with_expected_error(true, AddNodesItem { - parent_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Object, - node_attributes: variable_attributes("foo"), // !!! - type_definition: ObjectTypeId::AddressSpaceFileType.into(), - }, StatusCode::BadNodeAttributesInvalid); + do_add_node_test_with_expected_error( + true, + AddNodesItem { + parent_node_id: ObjectId::ObjectsFolder.into(), + reference_type_id: ReferenceTypeId::Organizes.into(), + requested_new_node_id: ExpandedNodeId::null(), + browse_name: QualifiedName::from("boo"), + node_class: NodeClass::Object, + node_attributes: variable_attributes("foo"), // !!! + type_definition: ObjectTypeId::AddressSpaceFileType.into(), + }, + StatusCode::BadNodeAttributesInvalid, + ); } #[test] fn add_nodes_browse_name_duplicated() { // Add a node which is valid - do_add_node_test_with_expected_error(true, AddNodesItem { - parent_node_id: NodeId::root_folder_id().into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("Objects"), // !!! - node_class: NodeClass::Object, - node_attributes: object_attributes("foo"), - type_definition: ObjectTypeId::BaseObjectType.into(), - }, StatusCode::BadBrowseNameDuplicated); + do_add_node_test_with_expected_error( + true, + AddNodesItem { + parent_node_id: NodeId::root_folder_id().into(), + reference_type_id: ReferenceTypeId::Organizes.into(), + requested_new_node_id: ExpandedNodeId::null(), + browse_name: QualifiedName::from("Objects"), // !!! + node_class: NodeClass::Object, + node_attributes: object_attributes("foo"), + type_definition: ObjectTypeId::BaseObjectType.into(), + }, + StatusCode::BadBrowseNameDuplicated, + ); } #[test] fn add_nodes_valid() { // Add a node which is valid - do_add_node_test_with_expected_error(true, AddNodesItem { - parent_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Object, - node_attributes: object_attributes("foo"), - type_definition: ObjectTypeId::BaseObjectType.into(), - }, StatusCode::Good); + do_add_node_test_with_expected_error( + true, + AddNodesItem { + parent_node_id: ObjectId::ObjectsFolder.into(), + reference_type_id: ReferenceTypeId::Organizes.into(), + requested_new_node_id: ExpandedNodeId::null(), + browse_name: QualifiedName::from("boo"), + node_class: NodeClass::Object, + node_attributes: object_attributes("foo"), + type_definition: ObjectTypeId::BaseObjectType.into(), + }, + StatusCode::Good, + ); } #[test] fn add_nodes_user_access_denied() { // Add a node without permission - do_add_node_test_with_expected_error(false, AddNodesItem { - parent_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - requested_new_node_id: ExpandedNodeId::null(), - browse_name: QualifiedName::from("boo"), - node_class: NodeClass::Object, - node_attributes: object_attributes("foo"), - type_definition: ObjectTypeId::BaseObjectType.into(), - }, StatusCode::BadUserAccessDenied); + do_add_node_test_with_expected_error( + false, + AddNodesItem { + parent_node_id: ObjectId::ObjectsFolder.into(), + reference_type_id: ReferenceTypeId::Organizes.into(), + requested_new_node_id: ExpandedNodeId::null(), + browse_name: QualifiedName::from("boo"), + node_class: NodeClass::Object, + node_attributes: object_attributes("foo"), + type_definition: ObjectTypeId::BaseObjectType.into(), + }, + StatusCode::BadUserAccessDenied, + ); } #[test] fn add_references() { // Add a reference where the node id is invalid - do_add_references_test(true, AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::HasProperty.into(), - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: var_node_id(1).into(), - target_node_class: NodeClass::Variable, - }, StatusCode::Good); + do_add_references_test( + true, + AddReferencesItem { + source_node_id: ObjectId::RootFolder.into(), + reference_type_id: ReferenceTypeId::HasProperty.into(), + is_forward: true, + target_server_uri: UAString::null(), + target_node_id: var_node_id(1).into(), + target_node_class: NodeClass::Variable, + }, + StatusCode::Good, + ); } #[test] fn add_references_source_node_id_invalid() { // Add a reference where the node id is invalid - do_add_references_test(true, AddReferencesItem { - source_node_id: NodeId::null(), // !!! - reference_type_id: ReferenceTypeId::HasChild.into(), - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: ObjectId::ServerConfiguration.into(), - target_node_class: NodeClass::Object, - }, StatusCode::BadSourceNodeIdInvalid); + do_add_references_test( + true, + AddReferencesItem { + source_node_id: NodeId::null(), // !!! + reference_type_id: ReferenceTypeId::HasChild.into(), + is_forward: true, + target_server_uri: UAString::null(), + target_node_id: ObjectId::ServerConfiguration.into(), + target_node_class: NodeClass::Object, + }, + StatusCode::BadSourceNodeIdInvalid, + ); } #[test] fn add_references_target_node_id_invalid() { // Add a reference where the node id is invalid - do_add_references_test(true, AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::HasChild.into(), - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: ExpandedNodeId::null(), // !!! - target_node_class: NodeClass::Object, - }, StatusCode::BadTargetNodeIdInvalid); + do_add_references_test( + true, + AddReferencesItem { + source_node_id: ObjectId::RootFolder.into(), + reference_type_id: ReferenceTypeId::HasChild.into(), + is_forward: true, + target_server_uri: UAString::null(), + target_node_id: ExpandedNodeId::null(), // !!! + target_node_class: NodeClass::Object, + }, + StatusCode::BadTargetNodeIdInvalid, + ); } #[test] fn add_references_server_uri_invalid() { // Add a reference where the server uri is invalid - do_add_references_test(true, AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::HasChild.into(), - is_forward: true, - target_server_uri: UAString::from("urn:foo"), // !!! - target_node_id: ObjectId::ServerConfiguration.into(), - target_node_class: NodeClass::Object, - }, StatusCode::BadServerUriInvalid); + do_add_references_test( + true, + AddReferencesItem { + source_node_id: ObjectId::RootFolder.into(), + reference_type_id: ReferenceTypeId::HasChild.into(), + is_forward: true, + target_server_uri: UAString::from("urn:foo"), // !!! + target_node_id: ObjectId::ServerConfiguration.into(), + target_node_class: NodeClass::Object, + }, + StatusCode::BadServerUriInvalid, + ); } #[test] @@ -378,147 +556,206 @@ fn add_references_reference_type_id_invalid() { // Add a reference where the reference type id is invalid // Null node - do_add_references_test(true, AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: NodeId::null(), // !!! - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: ObjectId::ObjectsFolder.into(), - target_node_class: NodeClass::Object, - }, StatusCode::BadReferenceTypeIdInvalid); + do_add_references_test( + true, + AddReferencesItem { + source_node_id: ObjectId::RootFolder.into(), + reference_type_id: NodeId::null(), // !!! + is_forward: true, + target_server_uri: UAString::null(), + target_node_id: ObjectId::ObjectsFolder.into(), + target_node_class: NodeClass::Object, + }, + StatusCode::BadReferenceTypeIdInvalid, + ); // Not a reference type id node - do_add_references_test(true, AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: MethodId::AddressSpaceFileType_Write.into(), // !!! - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: ObjectId::ObjectsFolder.into(), - target_node_class: NodeClass::Object, - }, StatusCode::BadReferenceTypeIdInvalid); + do_add_references_test( + true, + AddReferencesItem { + source_node_id: ObjectId::RootFolder.into(), + reference_type_id: MethodId::AddressSpaceFileType_Write.into(), // !!! + is_forward: true, + target_server_uri: UAString::null(), + target_node_id: ObjectId::ObjectsFolder.into(), + target_node_class: NodeClass::Object, + }, + StatusCode::BadReferenceTypeIdInvalid, + ); } #[test] fn add_references_reference_local_only() { // Add a reference where the reference is remote - do_add_references_test(true, AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::HasChild.into(), - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: ExpandedNodeId { server_index: 1, namespace_uri: UAString::null(), node_id: ObjectId::ServerConfiguration.into() }, // !!! - target_node_class: NodeClass::Object, - }, StatusCode::BadReferenceLocalOnly); + do_add_references_test( + true, + AddReferencesItem { + source_node_id: ObjectId::RootFolder.into(), + reference_type_id: ReferenceTypeId::HasChild.into(), + is_forward: true, + target_server_uri: UAString::null(), + target_node_id: ExpandedNodeId { + server_index: 1, + namespace_uri: UAString::null(), + node_id: ObjectId::ServerConfiguration.into(), + }, // !!! + target_node_class: NodeClass::Object, + }, + StatusCode::BadReferenceLocalOnly, + ); } #[test] fn add_references_duplicate_reference_not_allowed() { // Add a reference that is a duplicate - do_add_references_test(true, AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: ObjectId::ObjectsFolder.into(), - target_node_class: NodeClass::Object, - }, StatusCode::BadDuplicateReferenceNotAllowed); + do_add_references_test( + true, + AddReferencesItem { + source_node_id: ObjectId::RootFolder.into(), + reference_type_id: ReferenceTypeId::Organizes.into(), + is_forward: true, + target_server_uri: UAString::null(), + target_node_id: ObjectId::ObjectsFolder.into(), + target_node_class: NodeClass::Object, + }, + StatusCode::BadDuplicateReferenceNotAllowed, + ); } #[test] fn add_references_node_class_invalid() { // Add a reference where the node class is invalid - do_add_references_test(true, AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: ObjectId::ObjectsFolder.into(), - target_node_class: NodeClass::Unspecified, // !!! - }, StatusCode::BadNodeClassInvalid); + do_add_references_test( + true, + AddReferencesItem { + source_node_id: ObjectId::RootFolder.into(), + reference_type_id: ReferenceTypeId::Organizes.into(), + is_forward: true, + target_server_uri: UAString::null(), + target_node_id: ObjectId::ObjectsFolder.into(), + target_node_class: NodeClass::Unspecified, // !!! + }, + StatusCode::BadNodeClassInvalid, + ); // This supplies a target class which is different type from the target node's class - do_add_references_test(true, AddReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - is_forward: true, - target_server_uri: UAString::null(), - target_node_id: ObjectId::ObjectsFolder.into(), - target_node_class: NodeClass::Variable, // !!! - }, StatusCode::BadNodeClassInvalid); + do_add_references_test( + true, + AddReferencesItem { + source_node_id: ObjectId::RootFolder.into(), + reference_type_id: ReferenceTypeId::Organizes.into(), + is_forward: true, + target_server_uri: UAString::null(), + target_node_id: ObjectId::ObjectsFolder.into(), + target_node_class: NodeClass::Variable, // !!! + }, + StatusCode::BadNodeClassInvalid, + ); } #[test] fn delete_nodes() { // delete a node by node id - do_delete_nodes_test(true, DeleteNodesItem { - node_id: var_node_id(1), - delete_target_references: false, - }, StatusCode::Good); - do_delete_nodes_test(true, DeleteNodesItem { - node_id: var_node_id(2), - delete_target_references: true, - }, StatusCode::Good); + do_delete_nodes_test( + true, + DeleteNodesItem { + node_id: var_node_id(1), + delete_target_references: false, + }, + StatusCode::Good, + ); + do_delete_nodes_test( + true, + DeleteNodesItem { + node_id: var_node_id(2), + delete_target_references: true, + }, + StatusCode::Good, + ); } #[test] fn delete_nodes_node_id_unknown() { // delete a node by node id when it does not exist - do_delete_nodes_test(true, DeleteNodesItem { - node_id: var_node_id(2000), // !!! - delete_target_references: false, - }, StatusCode::BadNodeIdUnknown); + do_delete_nodes_test( + true, + DeleteNodesItem { + node_id: var_node_id(2000), // !!! + delete_target_references: false, + }, + StatusCode::BadNodeIdUnknown, + ); } #[test] fn delete_nodes_user_access_denied() { // delete a node by node id without permission - do_delete_nodes_test(false, DeleteNodesItem { - node_id: var_node_id(1), - delete_target_references: false, - }, StatusCode::BadUserAccessDenied); + do_delete_nodes_test( + false, + DeleteNodesItem { + node_id: var_node_id(1), + delete_target_references: false, + }, + StatusCode::BadUserAccessDenied, + ); } #[test] fn delete_references() { - do_delete_references_test(true, DeleteReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - is_forward: true, - target_node_id: ObjectId::ObjectsFolder.into(), - delete_bidirectional: false, - }, StatusCode::Good); + do_delete_references_test( + true, + DeleteReferencesItem { + source_node_id: ObjectId::RootFolder.into(), + reference_type_id: ReferenceTypeId::Organizes.into(), + is_forward: true, + target_node_id: ObjectId::ObjectsFolder.into(), + delete_bidirectional: false, + }, + StatusCode::Good, + ); } #[test] fn delete_references_user_access_denied() { - do_delete_references_test(false, DeleteReferencesItem { - source_node_id: ObjectId::RootFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - is_forward: true, - target_node_id: ObjectId::ObjectsFolder.into(), - delete_bidirectional: false, - }, StatusCode::BadUserAccessDenied); + do_delete_references_test( + false, + DeleteReferencesItem { + source_node_id: ObjectId::RootFolder.into(), + reference_type_id: ReferenceTypeId::Organizes.into(), + is_forward: true, + target_node_id: ObjectId::ObjectsFolder.into(), + delete_bidirectional: false, + }, + StatusCode::BadUserAccessDenied, + ); } - #[test] fn delete_references_source_node_id_invalid() { - do_delete_references_test(true, DeleteReferencesItem { - source_node_id: NodeId::null(), // !!! - reference_type_id: ReferenceTypeId::Organizes.into(), - is_forward: true, - target_node_id: ObjectId::ObjectsFolder.into(), - delete_bidirectional: false, - }, StatusCode::BadSourceNodeIdInvalid); + do_delete_references_test( + true, + DeleteReferencesItem { + source_node_id: NodeId::null(), // !!! + reference_type_id: ReferenceTypeId::Organizes.into(), + is_forward: true, + target_node_id: ObjectId::ObjectsFolder.into(), + delete_bidirectional: false, + }, + StatusCode::BadSourceNodeIdInvalid, + ); } #[test] fn delete_references_target_node_id_invalid() { - do_delete_references_test(true, DeleteReferencesItem { - source_node_id: ObjectId::ObjectsFolder.into(), - reference_type_id: ReferenceTypeId::Organizes.into(), - is_forward: true, - target_node_id: ExpandedNodeId::null(), // !!! - delete_bidirectional: false, - }, StatusCode::BadTargetNodeIdInvalid); + do_delete_references_test( + true, + DeleteReferencesItem { + source_node_id: ObjectId::ObjectsFolder.into(), + reference_type_id: ReferenceTypeId::Organizes.into(), + is_forward: true, + target_node_id: ExpandedNodeId::null(), // !!! + delete_bidirectional: false, + }, + StatusCode::BadTargetNodeIdInvalid, + ); } diff --git a/server/src/tests/services/session.rs b/server/src/tests/services/session.rs index 69bb99db7..d5d3b9800 100644 --- a/server/src/tests/services/session.rs +++ b/server/src/tests/services/session.rs @@ -1,24 +1,28 @@ -use opcua_crypto::{ - random, - SecurityPolicy, - user_identity::make_user_name_identity_token, -}; +use opcua_crypto::{random, user_identity::make_user_name_identity_token, SecurityPolicy}; use opcua_types::{ActivateSessionRequest, RequestHeader, SignatureData}; use crate::{ builder::ServerBuilder, - identity_token::{POLICY_ID_USER_PASS_NONE, POLICY_ID_USER_PASS_RSA_15, POLICY_ID_USER_PASS_RSA_OAEP}, - tests::* + identity_token::{ + POLICY_ID_USER_PASS_NONE, POLICY_ID_USER_PASS_RSA_15, POLICY_ID_USER_PASS_RSA_OAEP, + }, + tests::*, }; fn dummy_activate_session_request() -> ActivateSessionRequest { ActivateSessionRequest { request_header: RequestHeader::dummy(), - client_signature: SignatureData { algorithm: UAString::null(), signature: ByteString::null() }, + client_signature: SignatureData { + algorithm: UAString::null(), + signature: ByteString::null(), + }, client_software_certificates: None, locale_ids: None, user_identity_token: ExtensionObject::null(), - user_token_signature: SignatureData { algorithm: UAString::null(), signature: ByteString::null() }, + user_token_signature: SignatureData { + algorithm: UAString::null(), + signature: ByteString::null(), + }, } } @@ -30,28 +34,59 @@ fn anonymous_user_token() { // Makes an anonymous token and sticks it into an extension object let token = AnonymousIdentityToken { - policy_id: UAString::from("anonymous") + policy_id: UAString::from("anonymous"), }; - let token = ExtensionObject::from_encodable(ObjectId::AnonymousIdentityToken_Encoding_DefaultBinary, &token); + let token = ExtensionObject::from_encodable( + ObjectId::AnonymousIdentityToken_Encoding_DefaultBinary, + &token, + ); let server_nonce = random::byte_string(20); let request = dummy_activate_session_request(); - let result = server_state.authenticate_endpoint(&request, "opc.tcp://localhost:4855/", SecurityPolicy::None, MessageSecurityMode::None, &token, &server_nonce); + let result = server_state.authenticate_endpoint( + &request, + "opc.tcp://localhost:4855/", + SecurityPolicy::None, + MessageSecurityMode::None, + &token, + &server_nonce, + ); trace!("result = {:?}", result); assert!(result.is_ok()); - let result = server_state.authenticate_endpoint(&request, "opc.tcp://localhost:4855/x", SecurityPolicy::None, MessageSecurityMode::None, &token, &server_nonce); + let result = server_state.authenticate_endpoint( + &request, + "opc.tcp://localhost:4855/x", + SecurityPolicy::None, + MessageSecurityMode::None, + &token, + &server_nonce, + ); trace!("result = {:?}", result); assert_eq!(result.unwrap_err(), StatusCode::BadTcpEndpointUrlInvalid); - let result = server_state.authenticate_endpoint(&request, "opc.tcp://localhost:4855/noaccess", SecurityPolicy::None, MessageSecurityMode::None, &token, &server_nonce); + let result = server_state.authenticate_endpoint( + &request, + "opc.tcp://localhost:4855/noaccess", + SecurityPolicy::None, + MessageSecurityMode::None, + &token, + &server_nonce, + ); trace!("result = {:?}", result); assert_eq!(result.unwrap_err(), StatusCode::BadIdentityTokenRejected); } -fn make_encrypted_user_name_identity_token(policy_id: &str, security_policy: SecurityPolicy, server_nonce: &ByteString, server_cert: &Option, user: &str, pass: &str) -> ExtensionObject { +fn make_encrypted_user_name_identity_token( + policy_id: &str, + security_policy: SecurityPolicy, + server_nonce: &ByteString, + server_cert: &Option, + user: &str, + pass: &str, +) -> ExtensionObject { let user_token_policy = opcua_types::service_types::UserTokenPolicy { policy_id: UAString::from(policy_id), token_type: UserTokenType::UserName, @@ -59,8 +94,19 @@ fn make_encrypted_user_name_identity_token(policy_id: &str, security_policy: Sec issuer_endpoint_url: UAString::null(), security_policy_uri: UAString::null(), }; - let token = make_user_name_identity_token(security_policy, &user_token_policy, server_nonce.as_ref(), server_cert, user, pass).unwrap(); - ExtensionObject::from_encodable(ObjectId::UserNameIdentityToken_Encoding_DefaultBinary, &token) + let token = make_user_name_identity_token( + security_policy, + &user_token_policy, + server_nonce.as_ref(), + server_cert, + user, + pass, + ) + .unwrap(); + ExtensionObject::from_encodable( + ObjectId::UserNameIdentityToken_Encoding_DefaultBinary, + &token, + ) } fn make_unencrypted_user_name_identity_token(user: &str, pass: &str) -> ExtensionObject { @@ -70,14 +116,18 @@ fn make_unencrypted_user_name_identity_token(user: &str, pass: &str) -> Extensio password: ByteString::from(pass.as_bytes()), encryption_algorithm: UAString::null(), }; - ExtensionObject::from_encodable(ObjectId::UserNameIdentityToken_Encoding_DefaultBinary, &token) + ExtensionObject::from_encodable( + ObjectId::UserNameIdentityToken_Encoding_DefaultBinary, + &token, + ) } #[test] fn user_name_pass_token() { let server = ServerBuilder::new_sample() .pki_dir("./pki_user_name_pass_token") - .server().unwrap(); + .server() + .unwrap(); let server_state = server.server_state(); let server_state = server_state.read().unwrap(); @@ -92,50 +142,162 @@ fn user_name_pass_token() { // Test that a good user authenticates in unencrypt and encrypted policies let token = make_unencrypted_user_name_identity_token("sample1", "sample1pwd"); - let result = server_state.authenticate_endpoint(&request, ENDPOINT_URL, SecurityPolicy::None, MessageSecurityMode::None, &token, &server_nonce); + let result = server_state.authenticate_endpoint( + &request, + ENDPOINT_URL, + SecurityPolicy::None, + MessageSecurityMode::None, + &token, + &server_nonce, + ); assert!(result.is_ok()); - let token = make_encrypted_user_name_identity_token(POLICY_ID_USER_PASS_RSA_15, SecurityPolicy::Basic128Rsa15, &server_nonce, &server_cert, "sample1", "sample1pwd"); - let result = server_state.authenticate_endpoint(&request, ENDPOINT_URL, SecurityPolicy::Basic128Rsa15, MessageSecurityMode::SignAndEncrypt, &token, &server_nonce); + let token = make_encrypted_user_name_identity_token( + POLICY_ID_USER_PASS_RSA_15, + SecurityPolicy::Basic128Rsa15, + &server_nonce, + &server_cert, + "sample1", + "sample1pwd", + ); + let result = server_state.authenticate_endpoint( + &request, + ENDPOINT_URL, + SecurityPolicy::Basic128Rsa15, + MessageSecurityMode::SignAndEncrypt, + &token, + &server_nonce, + ); assert!(result.is_ok()); - let token = make_encrypted_user_name_identity_token(POLICY_ID_USER_PASS_RSA_OAEP, SecurityPolicy::Basic256, &server_nonce, &server_cert, "sample1", "sample1pwd"); - let result = server_state.authenticate_endpoint(&request, ENDPOINT_URL, SecurityPolicy::Basic256, MessageSecurityMode::SignAndEncrypt, &token, &server_nonce); + let token = make_encrypted_user_name_identity_token( + POLICY_ID_USER_PASS_RSA_OAEP, + SecurityPolicy::Basic256, + &server_nonce, + &server_cert, + "sample1", + "sample1pwd", + ); + let result = server_state.authenticate_endpoint( + &request, + ENDPOINT_URL, + SecurityPolicy::Basic256, + MessageSecurityMode::SignAndEncrypt, + &token, + &server_nonce, + ); assert!(result.is_ok()); - let token = make_encrypted_user_name_identity_token(POLICY_ID_USER_PASS_RSA_OAEP, SecurityPolicy::Basic256Sha256, &server_nonce, &server_cert, "sample1", "sample1pwd"); - let result = server_state.authenticate_endpoint(&request, ENDPOINT_URL, SecurityPolicy::Basic256Sha256, MessageSecurityMode::SignAndEncrypt, &token, &server_nonce); + let token = make_encrypted_user_name_identity_token( + POLICY_ID_USER_PASS_RSA_OAEP, + SecurityPolicy::Basic256Sha256, + &server_nonce, + &server_cert, + "sample1", + "sample1pwd", + ); + let result = server_state.authenticate_endpoint( + &request, + ENDPOINT_URL, + SecurityPolicy::Basic256Sha256, + MessageSecurityMode::SignAndEncrypt, + &token, + &server_nonce, + ); assert!(result.is_ok()); // Invalid tests // Mismatch between security policy and encryption - let token = make_encrypted_user_name_identity_token(POLICY_ID_USER_PASS_RSA_15, SecurityPolicy::Basic256Sha256, &server_nonce, &server_cert, "sample1", "sample1pwd"); - let result = server_state.authenticate_endpoint(&request, ENDPOINT_URL, SecurityPolicy::Basic256Sha256, MessageSecurityMode::SignAndEncrypt, &token, &server_nonce); + let token = make_encrypted_user_name_identity_token( + POLICY_ID_USER_PASS_RSA_15, + SecurityPolicy::Basic256Sha256, + &server_nonce, + &server_cert, + "sample1", + "sample1pwd", + ); + let result = server_state.authenticate_endpoint( + &request, + ENDPOINT_URL, + SecurityPolicy::Basic256Sha256, + MessageSecurityMode::SignAndEncrypt, + &token, + &server_nonce, + ); assert_eq!(result.unwrap_err(), StatusCode::BadIdentityTokenInvalid); // No encryption policy when encryption is required - let token = make_encrypted_user_name_identity_token(POLICY_ID_USER_PASS_NONE, SecurityPolicy::Basic128Rsa15, &server_nonce, &server_cert, "sample1", "sample1pwd"); - let result = server_state.authenticate_endpoint(&request, ENDPOINT_URL, SecurityPolicy::Basic256Sha256, MessageSecurityMode::SignAndEncrypt, &token, &server_nonce); + let token = make_encrypted_user_name_identity_token( + POLICY_ID_USER_PASS_NONE, + SecurityPolicy::Basic128Rsa15, + &server_nonce, + &server_cert, + "sample1", + "sample1pwd", + ); + let result = server_state.authenticate_endpoint( + &request, + ENDPOINT_URL, + SecurityPolicy::Basic256Sha256, + MessageSecurityMode::SignAndEncrypt, + &token, + &server_nonce, + ); assert_eq!(result.unwrap_err(), StatusCode::BadIdentityTokenInvalid); // Invalid user let token = make_unencrypted_user_name_identity_token("samplex", "sample1pwd"); - let result = server_state.authenticate_endpoint(&request, ENDPOINT_URL, SecurityPolicy::None, MessageSecurityMode::None, &token, &server_nonce); + let result = server_state.authenticate_endpoint( + &request, + ENDPOINT_URL, + SecurityPolicy::None, + MessageSecurityMode::None, + &token, + &server_nonce, + ); assert_eq!(result.unwrap_err(), StatusCode::BadUserAccessDenied); // Invalid password let token = make_unencrypted_user_name_identity_token("sample1", "sample"); - let result = server_state.authenticate_endpoint(&request, ENDPOINT_URL, SecurityPolicy::None, MessageSecurityMode::None, &token, &server_nonce); + let result = server_state.authenticate_endpoint( + &request, + ENDPOINT_URL, + SecurityPolicy::None, + MessageSecurityMode::None, + &token, + &server_nonce, + ); assert_eq!(result.unwrap_err(), StatusCode::BadUserAccessDenied); // Empty user let token = make_unencrypted_user_name_identity_token("", "sample1pwd"); - let result = server_state.authenticate_endpoint(&request, ENDPOINT_URL, SecurityPolicy::None, MessageSecurityMode::None, &token, &server_nonce); + let result = server_state.authenticate_endpoint( + &request, + ENDPOINT_URL, + SecurityPolicy::None, + MessageSecurityMode::None, + &token, + &server_nonce, + ); assert_eq!(result.unwrap_err(), StatusCode::BadUserAccessDenied); // Invalid password (encrypted) - let token = make_encrypted_user_name_identity_token(POLICY_ID_USER_PASS_RSA_OAEP, SecurityPolicy::Basic128Rsa15, &server_nonce, &server_cert, "sample1", "samplexx1"); - let result = server_state.authenticate_endpoint(&request, ENDPOINT_URL, SecurityPolicy::Basic256Sha256, MessageSecurityMode::SignAndEncrypt, &token, &server_nonce); + let token = make_encrypted_user_name_identity_token( + POLICY_ID_USER_PASS_RSA_OAEP, + SecurityPolicy::Basic128Rsa15, + &server_nonce, + &server_cert, + "sample1", + "samplexx1", + ); + let result = server_state.authenticate_endpoint( + &request, + ENDPOINT_URL, + SecurityPolicy::Basic256Sha256, + MessageSecurityMode::SignAndEncrypt, + &token, + &server_nonce, + ); assert_eq!(result.unwrap_err(), StatusCode::BadUserAccessDenied); } diff --git a/server/src/tests/services/subscription.rs b/server/src/tests/services/subscription.rs index 30e82bf9b..39808b127 100644 --- a/server/src/tests/services/subscription.rs +++ b/server/src/tests/services/subscription.rs @@ -4,34 +4,52 @@ use chrono::Utc; use crate::{ prelude::*, - services::{ - monitored_item::MonitoredItemService, - subscription::SubscriptionService, - }, + services::{monitored_item::MonitoredItemService, subscription::SubscriptionService}, state::ServerState, subscriptions::subscription::*, }; use super::*; -fn create_subscription(server_state: Arc>, session: Arc>, ss: &SubscriptionService) -> u32 { +fn create_subscription( + server_state: Arc>, + session: Arc>, + ss: &SubscriptionService, +) -> u32 { let request = create_subscription_request(0, 0); debug!("{:#?}", request); - let response: CreateSubscriptionResponse = supported_message_as!(ss.create_subscription(server_state, session, &request), CreateSubscriptionResponse); + let response: CreateSubscriptionResponse = supported_message_as!( + ss.create_subscription(server_state, session, &request), + CreateSubscriptionResponse + ); debug!("{:#?}", response); response.subscription_id } -fn create_monitored_item(subscription_id: u32, node_to_monitor: T, server_state: Arc>, session: Arc>, address_space: Arc>, mis: &MonitoredItemService) where T: Into { +fn create_monitored_item( + subscription_id: u32, + node_to_monitor: T, + server_state: Arc>, + session: Arc>, + address_space: Arc>, + mis: &MonitoredItemService, +) where + T: Into, +{ // Create a monitored item let request = create_monitored_items_request(subscription_id, vec![node_to_monitor]); debug!("CreateMonitoredItemsRequest {:#?}", request); - let response: CreateMonitoredItemsResponse = supported_message_as!(mis.create_monitored_items(server_state, session, address_space, &request), CreateMonitoredItemsResponse); + let response: CreateMonitoredItemsResponse = supported_message_as!( + mis.create_monitored_items(server_state, session, address_space, &request), + CreateMonitoredItemsResponse + ); debug!("CreateMonitoredItemsResponse {:#?}", response); // let result = response.results.unwrap()[0].monitored_item_id; } -fn publish_request(subscription_acknowledgements: Option>) -> PublishRequest { +fn publish_request( + subscription_acknowledgements: Option>, +) -> PublishRequest { let request = PublishRequest { request_header: RequestHeader::dummy(), subscription_acknowledgements, @@ -60,11 +78,19 @@ fn create_modify_destroy_subscription() { /// Creates a subscription with the specified keep alive and lifetime values and compares /// the revised values to the expected values. -fn keepalive_test(keep_alive: u32, lifetime: u32, expected_keep_alive: u32, expected_lifetime: u32) { +fn keepalive_test( + keep_alive: u32, + lifetime: u32, + expected_keep_alive: u32, + expected_lifetime: u32, +) { do_subscription_service_test(|server_state, session, _, ss, _| { // Create subscription let request = create_subscription_request(keep_alive, lifetime); - let response: CreateSubscriptionResponse = supported_message_as!(ss.create_subscription(server_state, session, &request), CreateSubscriptionResponse); + let response: CreateSubscriptionResponse = supported_message_as!( + ss.create_subscription(server_state, session, &request), + CreateSubscriptionResponse + ); debug!("{:#?}", response); assert_eq!(response.revised_lifetime_count, expected_lifetime); assert_eq!(response.revised_max_keep_alive_count, expected_keep_alive); @@ -81,17 +107,37 @@ fn test_revised_keep_alive_lifetime_counts() { // Expect defaults to hold true keepalive_test(0, 0, DEFAULT_KEEP_ALIVE_COUNT, DEFAULT_LIFETIME_COUNT); - keepalive_test(0, (DEFAULT_KEEP_ALIVE_COUNT * 3) - 1, DEFAULT_KEEP_ALIVE_COUNT, DEFAULT_LIFETIME_COUNT); + keepalive_test( + 0, + (DEFAULT_KEEP_ALIVE_COUNT * 3) - 1, + DEFAULT_KEEP_ALIVE_COUNT, + DEFAULT_LIFETIME_COUNT, + ); // Expect lifetime to be 3 * keep alive keepalive_test(1, 3, 1, 3); keepalive_test(1, 4, 1, 4); keepalive_test(1, 2, 1, 3); - keepalive_test(DEFAULT_KEEP_ALIVE_COUNT, 2, DEFAULT_KEEP_ALIVE_COUNT, DEFAULT_LIFETIME_COUNT); + keepalive_test( + DEFAULT_KEEP_ALIVE_COUNT, + 2, + DEFAULT_KEEP_ALIVE_COUNT, + DEFAULT_LIFETIME_COUNT, + ); // Expect max values to be honoured - keepalive_test(MAX_KEEP_ALIVE_COUNT, 0, MAX_KEEP_ALIVE_COUNT, MAX_LIFETIME_COUNT); - keepalive_test(MAX_KEEP_ALIVE_COUNT + 1, 0, MAX_KEEP_ALIVE_COUNT, MAX_LIFETIME_COUNT); + keepalive_test( + MAX_KEEP_ALIVE_COUNT, + 0, + MAX_KEEP_ALIVE_COUNT, + MAX_LIFETIME_COUNT, + ); + keepalive_test( + MAX_KEEP_ALIVE_COUNT + 1, + 0, + MAX_KEEP_ALIVE_COUNT, + MAX_LIFETIME_COUNT, + ); } #[test] @@ -100,9 +146,14 @@ fn publish_with_no_subscriptions() { let request = publish_request(None); // Publish and expect a service fault BadNoSubscription let request_id = 1001; - let response = ss.async_publish(&Utc::now(), session, address_space, request_id, &request).unwrap(); + let response = ss + .async_publish(&Utc::now(), session, address_space, request_id, &request) + .unwrap(); let response: ServiceFault = supported_message_as!(response, ServiceFault); - assert_eq!(response.response_header.service_result, StatusCode::BadNoSubscription); + assert_eq!( + response.response_header.service_result, + StatusCode::BadNoSubscription + ); }) } @@ -115,12 +166,23 @@ fn publish_response_subscription() { let now = Utc::now(); // Create a monitored item - create_monitored_item(subscription_id, VariableId::Server_ServerStatus_StartTime, server_state.clone(), session.clone(), address_space.clone(), &mis); + create_monitored_item( + subscription_id, + VariableId::Server_ServerStatus_StartTime, + server_state.clone(), + session.clone(), + address_space.clone(), + &mis, + ); // Put the subscription into normal state { let mut session = trace_write_lock_unwrap!(session); - session.subscriptions_mut().get_mut(subscription_id).unwrap().set_state(SubscriptionState::Normal); + session + .subscriptions_mut() + .get_mut(subscription_id) + .unwrap() + .set_state(SubscriptionState::Normal); } // Send a publish and expect a publish response containing the subscription @@ -128,7 +190,13 @@ fn publish_response_subscription() { let request_id = 1001; let request = publish_request(None); // Tick subscriptions to trigger a change - let _ = ss.async_publish(&now, session.clone(), address_space.clone(), request_id, &request); + let _ = ss.async_publish( + &now, + session.clone(), + address_space.clone(), + request_id, + &request, + ); let now = now.add(chrono::Duration::seconds(2)); let mut session = trace_write_lock_unwrap!(session); @@ -137,10 +205,18 @@ fn publish_response_subscription() { // Ensure publish request was processed into a publish response assert_eq!(session.subscriptions_mut().publish_request_queue().len(), 0); - assert_eq!(session.subscriptions_mut().publish_response_queue().len(), 1); + assert_eq!( + session.subscriptions_mut().publish_response_queue().len(), + 1 + ); // Get the response from the queue - let response = session.subscriptions_mut().publish_response_queue().pop_back().unwrap().response; + let response = session + .subscriptions_mut() + .publish_response_queue() + .pop_back() + .unwrap() + .response; let response: PublishResponse = supported_message_as!(response, PublishResponse); debug!("PublishResponse {:#?}", response); @@ -161,7 +237,9 @@ fn publish_response_subscription() { // We expect the notification to contain one data change notification referring to // the monitored item. let decoding_limits = DecodingLimits::default(); - let data_change = notification_data[0].decode_inner::(&decoding_limits).unwrap(); + let data_change = notification_data[0] + .decode_inner::(&decoding_limits) + .unwrap(); assert!(data_change.monitored_items.is_some()); let monitored_items = data_change.monitored_items.unwrap(); assert_eq!(monitored_items.len(), 1); @@ -172,7 +250,10 @@ fn publish_response_subscription() { // We expect the queue to be empty, because we got an immediate response let mut session = trace_write_lock_unwrap!(session); - assert!(session.subscriptions_mut().publish_response_queue().is_empty()); + assert!(session + .subscriptions_mut() + .publish_response_queue() + .is_empty()); }) } @@ -184,12 +265,18 @@ fn publish_keep_alive() { // Create a monitored item { - let request = create_monitored_items_request(subscription_id, vec![ - (1, "v1"), - (1, "v1"), - ]); + let request = + create_monitored_items_request(subscription_id, vec![(1, "v1"), (1, "v1")]); debug!("CreateMonitoredItemsRequest {:#?}", request); - let response: CreateMonitoredItemsResponse = supported_message_as!(mis.create_monitored_items(server_state.clone(), session.clone(), address_space.clone(), &request), CreateMonitoredItemsResponse); + let response: CreateMonitoredItemsResponse = supported_message_as!( + mis.create_monitored_items( + server_state.clone(), + session.clone(), + address_space.clone(), + &request + ), + CreateMonitoredItemsResponse + ); debug!("CreateMonitoredItemsResponse {:#?}", response); // let result = response.results.unwrap()[0].monitored_item_id; } @@ -197,7 +284,10 @@ fn publish_keep_alive() { // Disable publishing to force a keep-alive { let mut session = trace_write_lock_unwrap!(session); - let subscription = session.subscriptions_mut().get_mut(subscription_id).unwrap(); + let subscription = session + .subscriptions_mut() + .get_mut(subscription_id) + .unwrap(); subscription.set_state(SubscriptionState::Normal); subscription.set_publishing_enabled(false); } @@ -209,13 +299,22 @@ fn publish_keep_alive() { let now = Utc::now(); // Don't expect a response right away - let response = ss.async_publish(&now, session.clone(), address_space.clone(), request_id, &request); + let response = ss.async_publish( + &now, + session.clone(), + address_space.clone(), + request_id, + &request, + ); assert!(response.is_none()); let mut session = trace_write_lock_unwrap!(session); let address_space = trace_read_lock_unwrap!(address_space); - assert!(!session.subscriptions_mut().publish_request_queue().is_empty()); + assert!(!session + .subscriptions_mut() + .publish_request_queue() + .is_empty()); // Tick subscriptions to trigger a change let now = now.add(chrono::Duration::seconds(2)); @@ -224,10 +323,18 @@ fn publish_keep_alive() { // Ensure publish request was processed into a publish response assert_eq!(session.subscriptions_mut().publish_request_queue().len(), 0); - assert_eq!(session.subscriptions_mut().publish_response_queue().len(), 1); + assert_eq!( + session.subscriptions_mut().publish_response_queue().len(), + 1 + ); // Get the response from the queue - let response = session.subscriptions_mut().publish_response_queue().pop_back().unwrap().response; + let response = session + .subscriptions_mut() + .publish_response_queue() + .pop_back() + .unwrap() + .response; let response: PublishResponse = supported_message_as!(response, PublishResponse); debug!("PublishResponse {:#?}", response); @@ -254,7 +361,13 @@ fn multiple_publish_response_subscription() { // Send a publish and expect nothing let request = publish_request(None); - let response = ss.async_publish(&now, session.clone(), address_space.clone(), request_id, &request); + let response = ss.async_publish( + &now, + session.clone(), + address_space.clone(), + request_id, + &request, + ); assert!(response.is_none()); // TODO Tick a change @@ -277,7 +390,13 @@ fn acknowledge_unknown_sequence_nr() { sequence_number: 10001, }; let request = publish_request(Some(vec![ack])); - let _response = ss.async_publish(&now, session.clone(), address_space.clone(), request_id, &request); + let _response = ss.async_publish( + &now, + session.clone(), + address_space.clone(), + request_id, + &request, + ); // TODO //unimplemented!(); @@ -296,10 +415,18 @@ fn republish() { client_handle: 1, value: Variant::Empty.into(), }]; - let notification = NotificationMessage::data_change(1, DateTime::now(), monitored_item_notifications, vec![]); + let notification = NotificationMessage::data_change( + 1, + DateTime::now(), + monitored_item_notifications, + vec![], + ); let sequence_number = notification.sequence_number; let mut session = trace_write_lock_unwrap!(session); - session.subscriptions_mut().retransmission_queue().insert((subscription_id, notification.sequence_number), notification); + session.subscriptions_mut().retransmission_queue().insert( + (subscription_id, notification.sequence_number), + notification, + ); sequence_number }; @@ -312,12 +439,20 @@ fn republish() { // try for a subscription id that does not exist, expect service fault let request = republish_request(subscription_id + 1, sequence_number); - let response: ServiceFault = supported_message_as!(ss.republish(session.clone(), &request), ServiceFault); - assert_eq!(response.response_header.service_result, StatusCode::BadSubscriptionIdInvalid); + let response: ServiceFault = + supported_message_as!(ss.republish(session.clone(), &request), ServiceFault); + assert_eq!( + response.response_header.service_result, + StatusCode::BadSubscriptionIdInvalid + ); // try for a sequence nr that does not exist let request = republish_request(subscription_id, sequence_number + 1); - let response: ServiceFault = supported_message_as!(ss.republish(session.clone(), &request), ServiceFault); - assert_eq!(response.response_header.service_result, StatusCode::BadMessageNotAvailable); + let response: ServiceFault = + supported_message_as!(ss.republish(session.clone(), &request), ServiceFault); + assert_eq!( + response.response_header.service_result, + StatusCode::BadMessageNotAvailable + ); }) -} \ No newline at end of file +} diff --git a/server/src/tests/services/view.rs b/server/src/tests/services/view.rs index d119a52e6..b43d6ff18 100644 --- a/server/src/tests/services/view.rs +++ b/server/src/tests/services/view.rs @@ -8,20 +8,28 @@ use super::*; // View service tests -fn make_browse_request(nodes: &[NodeId], node_class_mask: NodeClassMask, max_references_per_node: usize, browse_direction: BrowseDirection, reference_type: T) -> BrowseRequest - where T: Into + Clone +fn make_browse_request( + nodes: &[NodeId], + node_class_mask: NodeClassMask, + max_references_per_node: usize, + browse_direction: BrowseDirection, + reference_type: T, +) -> BrowseRequest +where + T: Into + Clone, { let request_header = make_request_header(); - let nodes_to_browse = nodes.iter().map(|n| { - BrowseDescription { + let nodes_to_browse = nodes + .iter() + .map(|n| BrowseDescription { node_id: n.clone(), browse_direction, reference_type_id: reference_type.clone().into(), include_subtypes: true, node_class_mask: node_class_mask.bits(), result_mask: BrowseDescriptionResultMask::all().bits() as u32, - } - }).collect(); + }) + .collect(); BrowseRequest { request_header, view: ViewDescription { @@ -34,16 +42,27 @@ fn make_browse_request(nodes: &[NodeId], node_class_mask: NodeClassMask, max_ } } -fn make_browse_next_request(continuation_point: &ByteString, release_continuation_points: bool) -> BrowseNextRequest { +fn make_browse_next_request( + continuation_point: &ByteString, + release_continuation_points: bool, +) -> BrowseNextRequest { let request_header = make_request_header(); BrowseNextRequest { request_header, release_continuation_points, - continuation_points: if continuation_point.is_null() { None } else { Some(vec![continuation_point.clone()]) }, + continuation_points: if continuation_point.is_null() { + None + } else { + Some(vec![continuation_point.clone()]) + }, } } -fn verify_references_to_many_vars(references: &[ReferenceDescription], expected_size: usize, start_idx: usize) { +fn verify_references_to_many_vars( + references: &[ReferenceDescription], + expected_size: usize, + start_idx: usize, +) { // Verify that the reference descriptions point at sequential vars assert_eq!(references.len(), expected_size); for (i, r) in references.iter().enumerate() { @@ -52,20 +71,51 @@ fn verify_references_to_many_vars(references: &[ReferenceDescription], expected_ } fn do_view_service_test(f: F) - where F: FnOnce(Arc>, Arc>, Arc>, &ViewService) +where + F: FnOnce( + Arc>, + Arc>, + Arc>, + &ViewService, + ), { opcua_console_logging::init(); let st = ServiceTest::new(); - f(st.server_state.clone(), st.session.clone(), st.address_space.clone(), &ViewService::new()); + f( + st.server_state.clone(), + st.session.clone(), + st.address_space.clone(), + &ViewService::new(), + ); } -fn do_browse(vs: &ViewService, server_state: Arc>, session: Arc>, address_space: Arc>, nodes: &[NodeId], max_references_per_node: usize, browse_direction: BrowseDirection) -> BrowseResponse { - let request = make_browse_request(nodes, NodeClassMask::empty(), max_references_per_node, browse_direction, ReferenceTypeId::Organizes); +fn do_browse( + vs: &ViewService, + server_state: Arc>, + session: Arc>, + address_space: Arc>, + nodes: &[NodeId], + max_references_per_node: usize, + browse_direction: BrowseDirection, +) -> BrowseResponse { + let request = make_browse_request( + nodes, + NodeClassMask::empty(), + max_references_per_node, + browse_direction, + ReferenceTypeId::Organizes, + ); let response = vs.browse(server_state, session, address_space, &request); supported_message_as!(response, BrowseResponse) } -fn do_browse_next(vs: &ViewService, session: Arc>, address_space: Arc>, continuation_point: &ByteString, release_continuation_points: bool) -> BrowseNextResponse { +fn do_browse_next( + vs: &ViewService, + session: Arc>, + address_space: Arc>, + continuation_point: &ByteString, + release_continuation_points: bool, +) -> BrowseNextResponse { let request = make_browse_next_request(continuation_point, release_continuation_points); let response = vs.browse_next(session, address_space, &request); supported_message_as!(response, BrowseNextResponse) @@ -77,7 +127,15 @@ fn browse() { add_sample_vars_to_address_space(address_space.clone()); let nodes: Vec = vec![ObjectId::RootFolder.into()]; - let response = do_browse(&vs, server_state, session.clone(), address_space.clone(), &nodes, 1000, BrowseDirection::Forward); + let response = do_browse( + &vs, + server_state, + session.clone(), + address_space.clone(), + &nodes, + 1000, + BrowseDirection::Forward, + ); assert!(response.results.is_some()); let results = response.results.unwrap(); @@ -108,18 +166,35 @@ fn browse_non_null_view() { let nodes: Vec = vec![ObjectId::RootFolder.into()]; // Expect a non-null view to be rejected - let mut request = make_browse_request(&nodes, NodeClassMask::empty(), 1000, BrowseDirection::Forward, ReferenceTypeId::Organizes); + let mut request = make_browse_request( + &nodes, + NodeClassMask::empty(), + 1000, + BrowseDirection::Forward, + ReferenceTypeId::Organizes, + ); request.view.view_id = NodeId::new(1, "FakeView"); - let response = vs.browse(server_state.clone(), session.clone(), address_space.clone(), &request); + let response = vs.browse( + server_state.clone(), + session.clone(), + address_space.clone(), + &request, + ); let response = supported_message_as!(response, ServiceFault); - assert_eq!(response.response_header.service_result, StatusCode::BadViewIdUnknown); + assert_eq!( + response.response_header.service_result, + StatusCode::BadViewIdUnknown + ); // Expect a non-0 timestamp to be rejected request.view.view_id = NodeId::null(); request.view.timestamp = DateTime::now(); let response = vs.browse(server_state, session, address_space, &request); let response = supported_message_as!(response, ServiceFault); - assert_eq!(response.response_header.service_result, StatusCode::BadViewIdUnknown); + assert_eq!( + response.response_header.service_result, + StatusCode::BadViewIdUnknown + ); }); } @@ -130,7 +205,13 @@ fn browse_node_class_mask() { add_sample_vars_to_address_space(address_space.clone()); let nodes: Vec = vec![ObjectId::Server.into()]; - let request = make_browse_request(&nodes, NodeClassMask::OBJECT, 1000, BrowseDirection::Forward, ReferenceTypeId::HasComponent); + let request = make_browse_request( + &nodes, + NodeClassMask::OBJECT, + 1000, + BrowseDirection::Forward, + ReferenceTypeId::HasComponent, + ); let response = vs.browse(server_state, session, address_space, &request); let response = supported_message_as!(response, BrowseResponse); @@ -147,16 +228,26 @@ fn browse_node_class_mask() { }); } -fn verify_references(expected: &[(ReferenceTypeId, NodeId, bool)], references: &[ReferenceDescription]) { +fn verify_references( + expected: &[(ReferenceTypeId, NodeId, bool)], + references: &[ReferenceDescription], +) { if expected.len() != references.len() { debug!("Check expected references to this actual list:"); expected.iter().for_each(|r| { let reference_type_id: NodeId = r.0.into(); let node_id: NodeId = r.1.clone(); let is_forward = r.2; - let found = references.iter().any(|r| r.reference_type_id == reference_type_id && r.node_id.node_id == node_id && r.is_forward == is_forward); + let found = references.iter().any(|r| { + r.reference_type_id == reference_type_id + && r.node_id.node_id == node_id + && r.is_forward == is_forward + }); if !found { - debug!(" Missing expected ({:?}, {:?}, {:?}),", r.0, node_id, is_forward); + debug!( + " Missing expected ({:?}, {:?}, {:?}),", + r.0, node_id, is_forward + ); } }); references.iter().for_each(|r| { @@ -164,10 +255,15 @@ fn verify_references(expected: &[(ReferenceTypeId, NodeId, bool)], references: & let reference_type_id: NodeId = e.0.into(); let node_id: NodeId = e.1.clone(); let is_forward = e.2; - r.reference_type_id == reference_type_id && r.node_id.node_id == node_id && r.is_forward == is_forward + r.reference_type_id == reference_type_id + && r.node_id.node_id == node_id + && r.is_forward == is_forward }); if !found { - debug!(" Surplus ({:?}, {:?}, {:?}),", r.reference_type_id, r.node_id.node_id, r.is_forward); + debug!( + " Surplus ({:?}, {:?}, {:?}),", + r.reference_type_id, r.node_id.node_id, r.is_forward + ); } }); } @@ -177,7 +273,11 @@ fn verify_references(expected: &[(ReferenceTypeId, NodeId, bool)], references: & let reference_type_id: NodeId = e.0.into(); let node_id: NodeId = e.1.clone(); let is_forward = e.2; - let reference = references.iter().find(|r| r.reference_type_id == reference_type_id && r.node_id.node_id == node_id && r.is_forward == is_forward); + let reference = references.iter().find(|r| { + r.reference_type_id == reference_type_id + && r.node_id.node_id == node_id + && r.is_forward == is_forward + }); assert!(reference.is_some()); }); } @@ -191,7 +291,13 @@ fn browse_inverse() { let node_id: NodeId = ObjectTypeId::FolderType.into(); let nodes = vec![node_id.clone()]; - let request = make_browse_request(&nodes, NodeClassMask::empty(), 1000, BrowseDirection::Inverse, NodeId::null()); + let request = make_browse_request( + &nodes, + NodeClassMask::empty(), + 1000, + BrowseDirection::Inverse, + NodeId::null(), + ); let response = vs.browse(server_state, session, address_space, &request); let response = supported_message_as!(response, BrowseResponse); @@ -202,7 +308,10 @@ fn browse_inverse() { let references = results.get(0).unwrap().references.as_ref().unwrap(); // We do NOT expect to find the node in the list of results - assert!(references.iter().find(|r| r.node_id.node_id == node_id).is_none()); + assert!(references + .iter() + .find(|r| r.node_id.node_id == node_id) + .is_none()); // We expect this many results assert_eq!(references.len(), 21); @@ -210,27 +319,111 @@ fn browse_inverse() { let expected: Vec<(ReferenceTypeId, NodeId, bool)> = vec![ // (ref_type, node_id, is_forward) // Inverse refs - (ReferenceTypeId::HasTypeDefinition, ObjectId::HistoryServerCapabilitiesType_AggregateFunctions.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::ObjectTypesFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::DataTypesFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::ServerType_ServerCapabilities_ModellingRules.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::HistoryServerCapabilities_AggregateFunctions.into(), false), - (ReferenceTypeId::HasSubtype, ObjectTypeId::BaseObjectType.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::ServerCapabilitiesType_AggregateFunctions.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::Server_ServerCapabilities_AggregateFunctions.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::TypesFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::ServerCapabilitiesType_ModellingRules.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::ObjectsFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::VariableTypesFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::RootFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::ServerType_ServerCapabilities_AggregateFunctions.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::ViewsFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::EventTypesFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::Server_ServerCapabilities_ModellingRules.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::ReferenceTypesFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::HistoricalDataConfigurationType_AggregateFunctions.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::InterfaceTypes.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::AuthorizationServices.into(), false), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::HistoryServerCapabilitiesType_AggregateFunctions.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::ObjectTypesFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::DataTypesFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::ServerType_ServerCapabilities_ModellingRules.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::HistoryServerCapabilities_AggregateFunctions.into(), + false, + ), + ( + ReferenceTypeId::HasSubtype, + ObjectTypeId::BaseObjectType.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::ServerCapabilitiesType_AggregateFunctions.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::Server_ServerCapabilities_AggregateFunctions.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::TypesFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::ServerCapabilitiesType_ModellingRules.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::ObjectsFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::VariableTypesFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::RootFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::ServerType_ServerCapabilities_AggregateFunctions.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::ViewsFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::EventTypesFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::Server_ServerCapabilities_ModellingRules.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::ReferenceTypesFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::HistoricalDataConfigurationType_AggregateFunctions.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::InterfaceTypes.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::AuthorizationServices.into(), + false, + ), ]; verify_references(&expected, references); }); @@ -245,7 +438,13 @@ fn browse_both() { let node_id: NodeId = ObjectTypeId::FolderType.into(); let nodes = vec![node_id.clone()]; - let request = make_browse_request(&nodes, NodeClassMask::empty(), 1000, BrowseDirection::Both, NodeId::null()); + let request = make_browse_request( + &nodes, + NodeClassMask::empty(), + 1000, + BrowseDirection::Both, + NodeId::null(), + ); let response = vs.browse(server_state, session, address_space, &request); let response = supported_message_as!(response, BrowseResponse); @@ -256,7 +455,10 @@ fn browse_both() { let references = results.get(0).unwrap().references.as_ref().unwrap(); // We do NOT expect to find the node in the list of results - assert!(references.iter().find(|r| r.node_id.node_id == node_id).is_none()); + assert!(references + .iter() + .find(|r| r.node_id.node_id == node_id) + .is_none()); // We expect this many results assert_eq!(references.len(), 29); @@ -264,36 +466,152 @@ fn browse_both() { let expected: Vec<(ReferenceTypeId, NodeId, bool)> = vec![ // (ref_type, node_id, is_forward) // Forward refs - (ReferenceTypeId::HasSubtype, ObjectTypeId::OperationLimitsType.into(), true), - (ReferenceTypeId::HasSubtype, ObjectTypeId::FileDirectoryType.into(), true), - (ReferenceTypeId::HasSubtype, ObjectTypeId::CertificateGroupFolderType.into(), true), + ( + ReferenceTypeId::HasSubtype, + ObjectTypeId::OperationLimitsType.into(), + true, + ), + ( + ReferenceTypeId::HasSubtype, + ObjectTypeId::FileDirectoryType.into(), + true, + ), + ( + ReferenceTypeId::HasSubtype, + ObjectTypeId::CertificateGroupFolderType.into(), + true, + ), // Inverse refs - (ReferenceTypeId::HasTypeDefinition, ObjectId::HistoryServerCapabilitiesType_AggregateFunctions.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::ObjectTypesFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::DataTypesFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::ServerType_ServerCapabilities_ModellingRules.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::HistoryServerCapabilities_AggregateFunctions.into(), false), - (ReferenceTypeId::HasSubtype, ObjectTypeId::BaseObjectType.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::ServerCapabilitiesType_AggregateFunctions.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::Server_ServerCapabilities_AggregateFunctions.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::TypesFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::ServerCapabilitiesType_ModellingRules.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::ObjectsFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::VariableTypesFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::RootFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::ServerType_ServerCapabilities_AggregateFunctions.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::ViewsFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::EventTypesFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::Server_ServerCapabilities_ModellingRules.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::ReferenceTypesFolder.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::HistoricalDataConfigurationType_AggregateFunctions.into(), false), - (ReferenceTypeId::HasSubtype, ObjectTypeId::DictionaryFolderType.into(), true), - (ReferenceTypeId::HasSubtype, ObjectTypeId::AlarmGroupType.into(), true), - (ReferenceTypeId::HasSubtype, ObjectTypeId::KeyCredentialConfigurationFolderType.into(), true), - (ReferenceTypeId::HasSubtype, ObjectTypeId::SecurityGroupFolderType.into(), true), - (ReferenceTypeId::HasSubtype, ObjectTypeId::DataSetFolderType.into(), true), - (ReferenceTypeId::HasTypeDefinition, ObjectId::InterfaceTypes.into(), false), - (ReferenceTypeId::HasTypeDefinition, ObjectId::AuthorizationServices.into(), false), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::HistoryServerCapabilitiesType_AggregateFunctions.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::ObjectTypesFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::DataTypesFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::ServerType_ServerCapabilities_ModellingRules.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::HistoryServerCapabilities_AggregateFunctions.into(), + false, + ), + ( + ReferenceTypeId::HasSubtype, + ObjectTypeId::BaseObjectType.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::ServerCapabilitiesType_AggregateFunctions.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::Server_ServerCapabilities_AggregateFunctions.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::TypesFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::ServerCapabilitiesType_ModellingRules.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::ObjectsFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::VariableTypesFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::RootFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::ServerType_ServerCapabilities_AggregateFunctions.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::ViewsFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::EventTypesFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::Server_ServerCapabilities_ModellingRules.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::ReferenceTypesFolder.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::HistoricalDataConfigurationType_AggregateFunctions.into(), + false, + ), + ( + ReferenceTypeId::HasSubtype, + ObjectTypeId::DictionaryFolderType.into(), + true, + ), + ( + ReferenceTypeId::HasSubtype, + ObjectTypeId::AlarmGroupType.into(), + true, + ), + ( + ReferenceTypeId::HasSubtype, + ObjectTypeId::KeyCredentialConfigurationFolderType.into(), + true, + ), + ( + ReferenceTypeId::HasSubtype, + ObjectTypeId::SecurityGroupFolderType.into(), + true, + ), + ( + ReferenceTypeId::HasSubtype, + ObjectTypeId::DataSetFolderType.into(), + true, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::InterfaceTypes.into(), + false, + ), + ( + ReferenceTypeId::HasTypeDefinition, + ObjectId::AuthorizationServices.into(), + false, + ), ]; verify_references(&expected, references); }); @@ -305,7 +623,15 @@ fn browse_next_no_cp1() { let parent_node_id = add_many_vars_to_address_space(address_space.clone(), 100).0; let nodes = vec![parent_node_id.clone()]; // Browse with requested_max_references_per_node = 101, expect 100 results, no continuation point - let response = do_browse(&vs, server_state, session.clone(), address_space.clone(), &nodes, 101, BrowseDirection::Forward); + let response = do_browse( + &vs, + server_state, + session.clone(), + address_space.clone(), + &nodes, + 101, + BrowseDirection::Forward, + ); assert!(response.results.is_some()); let r1 = &response.results.unwrap()[0]; let references = r1.references.as_ref().unwrap(); @@ -320,7 +646,15 @@ fn browse_next_no_cp2() { let parent_node_id = add_many_vars_to_address_space(address_space.clone(), 100).0; let nodes = vec![parent_node_id.clone()]; // Browse with requested_max_references_per_node = 100, expect 100 results, no continuation point - let response = do_browse(&vs, server_state, session.clone(), address_space.clone(), &nodes, 100, BrowseDirection::Forward); + let response = do_browse( + &vs, + server_state, + session.clone(), + address_space.clone(), + &nodes, + 100, + BrowseDirection::Forward, + ); let r1 = &response.results.unwrap()[0]; let references = r1.references.as_ref().unwrap(); assert!(r1.continuation_point.is_null()); @@ -336,21 +670,41 @@ fn browse_next_cp() { let parent_node_id = add_many_vars_to_address_space(address_space.clone(), 100).0; let nodes = vec![parent_node_id.clone()]; // Get first 99 - let response = do_browse(&vs, server_state, session.clone(), address_space.clone(), &nodes, 99, BrowseDirection::Forward); + let response = do_browse( + &vs, + server_state, + session.clone(), + address_space.clone(), + &nodes, + 99, + BrowseDirection::Forward, + ); let r1 = &response.results.unwrap()[0]; let references = r1.references.as_ref().unwrap(); assert!(!r1.continuation_point.is_null()); verify_references_to_many_vars(references, 99, 0); // Expect continuation point and browse next to return last var and no more continuation point - let response = do_browse_next(&vs, session.clone(), address_space.clone(), &r1.continuation_point, false); + let response = do_browse_next( + &vs, + session.clone(), + address_space.clone(), + &r1.continuation_point, + false, + ); let r2 = &response.results.unwrap()[0]; assert!(r2.continuation_point.is_null()); let references = r2.references.as_ref().unwrap(); verify_references_to_many_vars(references, 1, 99); // Browse next again with same continuation point, expect failure - let response = do_browse_next(&vs, session.clone(), address_space.clone(), &r1.continuation_point, false); + let response = do_browse_next( + &vs, + session.clone(), + address_space.clone(), + &r1.continuation_point, + false, + ); let r2 = &response.results.unwrap()[0]; assert!(r2.continuation_point.is_null()); assert_eq!(r2.status_code, StatusCode::BadContinuationPointInvalid); @@ -364,17 +718,37 @@ fn browse_next_release_cp() { let parent_node_id = add_many_vars_to_address_space(address_space.clone(), 100).0; let nodes = vec![parent_node_id.clone()]; // Get first 99 - let response = do_browse(&vs, server_state, session.clone(), address_space.clone(), &nodes, 99, BrowseDirection::Forward); + let response = do_browse( + &vs, + server_state, + session.clone(), + address_space.clone(), + &nodes, + 99, + BrowseDirection::Forward, + ); let r1 = &response.results.unwrap()[0]; let _references = r1.references.as_ref().unwrap(); assert!(!r1.continuation_point.is_null()); // Browse next and release the previous continuation points, expect Null result - let response = do_browse_next(&vs, session.clone(), address_space.clone(), &r1.continuation_point, true); + let response = do_browse_next( + &vs, + session.clone(), + address_space.clone(), + &r1.continuation_point, + true, + ); assert!(response.results.is_none()); // Browse next again with same continuation point, expect BadContinuationPointInvalid - let response = do_browse_next(&vs, session.clone(), address_space.clone(), &r1.continuation_point, false); + let response = do_browse_next( + &vs, + session.clone(), + address_space.clone(), + &r1.continuation_point, + false, + ); let r1 = &response.results.unwrap()[0]; assert_eq!(r1.status_code, StatusCode::BadContinuationPointInvalid); }); @@ -390,21 +764,41 @@ fn browse_next_multiple_cps() { // Browse next with cp1 with 35 expect cp2 // Browse next with cp2 expect 30 results // Get first 35 - let response = do_browse(&vs, server_state, session.clone(), address_space.clone(), &nodes, 35, BrowseDirection::Forward); + let response = do_browse( + &vs, + server_state, + session.clone(), + address_space.clone(), + &nodes, + 35, + BrowseDirection::Forward, + ); let r1 = &response.results.unwrap()[0]; let references = r1.references.as_ref().unwrap(); assert!(!r1.continuation_point.is_null()); verify_references_to_many_vars(references, 35, 0); // Expect continuation point and browse next to return last var and no more continuation point - let response = do_browse_next(&vs, session.clone(), address_space.clone(), &r1.continuation_point, false); + let response = do_browse_next( + &vs, + session.clone(), + address_space.clone(), + &r1.continuation_point, + false, + ); let r2 = &response.results.unwrap()[0]; assert!(!r2.continuation_point.is_null()); let references = r2.references.as_ref().unwrap(); verify_references_to_many_vars(references, 35, 35); // Expect continuation point and browse next to return last var and no more continuation point - let response = do_browse_next(&vs, session.clone(), address_space.clone(), &r2.continuation_point, false); + let response = do_browse_next( + &vs, + session.clone(), + address_space.clone(), + &r2.continuation_point, + false, + ); let r3 = &response.results.unwrap()[0]; assert!(r3.continuation_point.is_null()); let references = r3.references.as_ref().unwrap(); @@ -423,7 +817,15 @@ fn browse_next_modify_address_space() { use std::thread; use std::time::Duration; - let response = do_browse(&vs, server_state, session.clone(), address_space.clone(), &nodes, 99, BrowseDirection::Forward); + let response = do_browse( + &vs, + server_state, + session.clone(), + address_space.clone(), + &nodes, + 99, + BrowseDirection::Forward, + ); let r1 = &response.results.unwrap()[0]; let _references = r1.references.as_ref().unwrap(); assert!(!r1.continuation_point.is_null()); @@ -441,7 +843,13 @@ fn browse_next_modify_address_space() { } // Browsing with the old continuation point should fail - let response = do_browse_next(&vs, session.clone(), address_space.clone(), &r1.continuation_point, false); + let response = do_browse_next( + &vs, + session.clone(), + address_space.clone(), + &r1.continuation_point, + false, + ); let r1 = &response.results.unwrap()[0]; assert_eq!(r1.status_code, StatusCode::BadContinuationPointInvalid); }); @@ -453,21 +861,17 @@ fn translate_browse_paths_to_node_ids() { // This is a very basic test of this service. It wants to find the relative path from root to the // Objects folder and ensure that it comes back in the result - let browse_paths = vec![ - BrowsePath { - starting_node: ObjectId::RootFolder.into(), - relative_path: RelativePath { - elements: Some(vec![ - RelativePathElement { - reference_type_id: ReferenceTypeId::Organizes.into(), - is_inverse: false, - include_subtypes: true, - target_name: QualifiedName::new(0, "Objects"), - } - ]), - }, - } - ]; + let browse_paths = vec![BrowsePath { + starting_node: ObjectId::RootFolder.into(), + relative_path: RelativePath { + elements: Some(vec![RelativePathElement { + reference_type_id: ReferenceTypeId::Organizes.into(), + is_inverse: false, + include_subtypes: true, + target_name: QualifiedName::new(0, "Objects"), + }]), + }, + }]; let request = TranslateBrowsePathsToNodeIdsRequest { request_header: make_request_header(), @@ -475,7 +879,8 @@ fn translate_browse_paths_to_node_ids() { }; let response = vs.translate_browse_paths_to_node_ids(server_state, address_space, &request); - let response: TranslateBrowsePathsToNodeIdsResponse = supported_message_as!(response, TranslateBrowsePathsToNodeIdsResponse); + let response: TranslateBrowsePathsToNodeIdsResponse = + supported_message_as!(response, TranslateBrowsePathsToNodeIdsResponse); debug!("result = {:#?}", response); @@ -492,7 +897,6 @@ fn translate_browse_paths_to_node_ids() { #[test] fn translate_browse_paths_to_node_ids2() { do_view_service_test(|server_state, _session, address_space, vs| { - // Inputs and outputs taken from this testcase in Node OPCUA // // https://github.com/node-opcua/node-opcua/blob/68b1b57dec23a45148468fbea89ab71a39f9042f/test/end_to_end/u_test_e2e_translateBrowsePath.js @@ -507,12 +911,17 @@ fn translate_browse_paths_to_node_ids2() { "/Objects/Server.ServerStatus.BuildInfo.", "/Objects.Server", "/Objects/2:MatrikonOPC Simulation Server (DA)", - ].iter().map(|path| - BrowsePath { - starting_node: starting_node.clone(), - relative_path: RelativePath::from_str(path, &RelativePathElement::default_node_resolver).unwrap(), - } - ).collect::>(); + ] + .iter() + .map(|path| BrowsePath { + starting_node: starting_node.clone(), + relative_path: RelativePath::from_str( + path, + &RelativePathElement::default_node_resolver, + ) + .unwrap(), + }) + .collect::>(); let request = TranslateBrowsePathsToNodeIdsRequest { request_header: make_request_header(), @@ -522,7 +931,8 @@ fn translate_browse_paths_to_node_ids2() { let browse_paths_len = request.browse_paths.as_ref().unwrap().len(); let response = vs.translate_browse_paths_to_node_ids(server_state, address_space, &request); - let response: TranslateBrowsePathsToNodeIdsResponse = supported_message_as!(response, TranslateBrowsePathsToNodeIdsResponse); + let response: TranslateBrowsePathsToNodeIdsResponse = + supported_message_as!(response, TranslateBrowsePathsToNodeIdsResponse); let results = response.results.unwrap(); assert_eq!(results.len(), browse_paths_len); @@ -547,7 +957,10 @@ fn translate_browse_paths_to_node_ids2() { let targets = r.targets.as_ref().unwrap(); trace!("targets for {} = {:#?}", idx, targets); assert_eq!(targets.len(), 1); - assert_eq!(&targets[0].target_id, &VariableId::Server_ServerStatus.into()); + assert_eq!( + &targets[0].target_id, + &VariableId::Server_ServerStatus.into() + ); idx += 1; } @@ -558,7 +971,10 @@ fn translate_browse_paths_to_node_ids2() { let targets = r.targets.as_ref().unwrap(); trace!("targets for {} = {:#?}", idx, targets); assert_eq!(targets.len(), 1); - assert_eq!(&targets[0].target_id, &VariableId::Server_ServerStatus_BuildInfo.into()); + assert_eq!( + &targets[0].target_id, + &VariableId::Server_ServerStatus_BuildInfo.into() + ); idx += 1; } @@ -568,7 +984,10 @@ fn translate_browse_paths_to_node_ids2() { assert!(r.status_code.is_good()); let targets = r.targets.as_ref().unwrap(); trace!("targets for {} = {:#?}", idx, targets); - assert_eq!(&targets[0].target_id, &VariableId::Server_ServerStatus_BuildInfo_ProductName.into()); + assert_eq!( + &targets[0].target_id, + &VariableId::Server_ServerStatus_BuildInfo_ProductName.into() + ); idx += 1; } @@ -596,11 +1015,15 @@ fn translate_browse_paths_to_node_ids2() { } struct RegisterNodesImpl { - pub session: Weak> + pub session: Weak>, } impl RegisterNodes for RegisterNodesImpl { - fn register_nodes(&mut self, session: Arc>, nodes_to_register: &[NodeId]) -> Result, StatusCode> { + fn register_nodes( + &mut self, + session: Arc>, + nodes_to_register: &[NodeId], + ) -> Result, StatusCode> { let bad_node = ObjectId::ObjectsFolder.into(); let good_node = NodeId::new(1, 100); let alias_node = NodeId::new(1, 200); @@ -613,13 +1036,11 @@ impl RegisterNodes for RegisterNodesImpl { // The result will be the input except for the good node which will be aliased on its // way out. - let result = nodes_to_register.iter().map(|n| { - if *n == good_node { - &alias_node - } else { - n - } - }).cloned().collect(); + let result = nodes_to_register + .iter() + .map(|n| if *n == good_node { &alias_node } else { n }) + .cloned() + .collect(); Ok(result) } } @@ -628,8 +1049,11 @@ impl RegisterNodes for RegisterNodesImpl { struct UnregisterNodesImpl; impl UnregisterNodes for UnregisterNodesImpl { - fn unregister_nodes(&mut self, _session: Arc>, _nodes_to_unregister: &[NodeId]) -> Result<(), StatusCode> - { + fn unregister_nodes( + &mut self, + _session: Arc>, + _nodes_to_unregister: &[NodeId], + ) -> Result<(), StatusCode> { Ok(()) } } @@ -638,12 +1062,19 @@ impl UnregisterNodes for UnregisterNodesImpl { fn register_nodes_nothing_to_do() { do_view_service_test(|server_state, session, _address_space, vs| { // Empty request - let response = vs.register_nodes(server_state, session, &RegisterNodesRequest { - request_header: make_request_header(), - nodes_to_register: None, - }); + let response = vs.register_nodes( + server_state, + session, + &RegisterNodesRequest { + request_header: make_request_header(), + nodes_to_register: None, + }, + ); let response: ServiceFault = supported_message_as!(response, ServiceFault); - assert_eq!(response.response_header.service_result, StatusCode::BadNothingToDo); + assert_eq!( + response.response_header.service_result, + StatusCode::BadNothingToDo + ); }); } @@ -651,13 +1082,16 @@ fn register_nodes_nothing_to_do() { fn register_nodes_no_handler() { do_view_service_test(|server_state, session, _address_space, vs| { // Invalid request because impl has no registered handler - let response = vs.register_nodes(server_state, session, &RegisterNodesRequest { - request_header: make_request_header(), - nodes_to_register: Some(vec![ - ObjectId::ObjectsFolder.into() - ]), - }); - let response: RegisterNodesResponse = supported_message_as!(response, RegisterNodesResponse); + let response = vs.register_nodes( + server_state, + session, + &RegisterNodesRequest { + request_header: make_request_header(), + nodes_to_register: Some(vec![ObjectId::ObjectsFolder.into()]), + }, + ); + let response: RegisterNodesResponse = + supported_message_as!(response, RegisterNodesResponse); let registered_node_ids = response.registered_node_ids.unwrap(); // The middle node should be aliased assert_eq!(registered_node_ids[0], ObjectId::ObjectsFolder.into()); @@ -671,21 +1105,28 @@ fn register_nodes() { { let mut server_state = trace_write_lock_unwrap!(server_state); server_state.set_register_nodes_callbacks( - Box::new(RegisterNodesImpl { session: Weak::new() }), + Box::new(RegisterNodesImpl { + session: Weak::new(), + }), Box::new(UnregisterNodesImpl {}), ); } // Make a good call to register - let response = vs.register_nodes(server_state, session, &RegisterNodesRequest { - request_header: make_request_header(), - nodes_to_register: Some(vec![ - NodeId::new(1, 99), - NodeId::new(1, 100), - NodeId::new(1, 101), - ]), - }); - let response: RegisterNodesResponse = supported_message_as!(response, RegisterNodesResponse); + let response = vs.register_nodes( + server_state, + session, + &RegisterNodesRequest { + request_header: make_request_header(), + nodes_to_register: Some(vec![ + NodeId::new(1, 99), + NodeId::new(1, 100), + NodeId::new(1, 101), + ]), + }, + ); + let response: RegisterNodesResponse = + supported_message_as!(response, RegisterNodesResponse); let registered_node_ids = response.registered_node_ids.unwrap(); // The middle node should be aliased assert_eq!(registered_node_ids[0], NodeId::new(1, 99)); @@ -698,12 +1139,19 @@ fn register_nodes() { fn unregister_nodes_nothing_to_do() { do_view_service_test(|server_state, session, _address_space, vs| { // Empty request - let response = vs.unregister_nodes(server_state, session, &UnregisterNodesRequest { - request_header: make_request_header(), - nodes_to_unregister: None, - }); + let response = vs.unregister_nodes( + server_state, + session, + &UnregisterNodesRequest { + request_header: make_request_header(), + nodes_to_unregister: None, + }, + ); let response: ServiceFault = supported_message_as!(response, ServiceFault); - assert_eq!(response.response_header.service_result, StatusCode::BadNothingToDo); + assert_eq!( + response.response_header.service_result, + StatusCode::BadNothingToDo + ); }); } @@ -714,22 +1162,29 @@ fn unregister_nodes() { { let mut server_state = trace_write_lock_unwrap!(server_state); server_state.set_register_nodes_callbacks( - Box::new(RegisterNodesImpl { session: Weak::new() }), + Box::new(RegisterNodesImpl { + session: Weak::new(), + }), Box::new(UnregisterNodesImpl {}), ); } // Not much to validate except that the function returns good - let response = vs.unregister_nodes(server_state, session, &UnregisterNodesRequest { - request_header: make_request_header(), - nodes_to_unregister: Some(vec![ - NodeId::new(1, 99), - ObjectId::ObjectsFolder.into(), - NodeId::new(1, 100), - NodeId::new(1, 101), - ]), - }); - let response: UnregisterNodesResponse = supported_message_as!(response, UnregisterNodesResponse); + let response = vs.unregister_nodes( + server_state, + session, + &UnregisterNodesRequest { + request_header: make_request_header(), + nodes_to_unregister: Some(vec![ + NodeId::new(1, 99), + ObjectId::ObjectsFolder.into(), + NodeId::new(1, 100), + NodeId::new(1, 101), + ]), + }, + ); + let response: UnregisterNodesResponse = + supported_message_as!(response, UnregisterNodesResponse); assert_eq!(response.response_header.service_result, StatusCode::Good); }); -} \ No newline at end of file +} diff --git a/server/src/tests/subscriptions/mod.rs b/server/src/tests/subscriptions/mod.rs index a1527a322..18e8c20cc 100644 --- a/server/src/tests/subscriptions/mod.rs +++ b/server/src/tests/subscriptions/mod.rs @@ -1,2 +1,2 @@ mod subscription; -mod subscriptions; \ No newline at end of file +mod subscriptions; diff --git a/server/src/tests/subscriptions/subscription.rs b/server/src/tests/subscriptions/subscription.rs index d96250f84..c92a5767a 100644 --- a/server/src/tests/subscriptions/subscription.rs +++ b/server/src/tests/subscriptions/subscription.rs @@ -1,8 +1,11 @@ use std::sync::{Arc, RwLock}; use crate::{ - subscriptions::subscription::{Subscription, SubscriptionState, SubscriptionStateParams, TickReason, HandledState, UpdateStateAction}, diagnostics::ServerDiagnostics, + subscriptions::subscription::{ + HandledState, Subscription, SubscriptionState, SubscriptionStateParams, TickReason, + UpdateStateAction, + }, }; const DEFAULT_LIFETIME_COUNT: u32 = 300; @@ -10,14 +13,30 @@ const DEFAULT_KEEPALIVE_COUNT: u32 = 100; fn make_subscription(state: SubscriptionState) -> Subscription { let subscription_interval = 1000f64; - let mut result = Subscription::new(Arc::new(RwLock::new(ServerDiagnostics::default())), 0, true, subscription_interval, DEFAULT_LIFETIME_COUNT, DEFAULT_KEEPALIVE_COUNT, 0); + let mut result = Subscription::new( + Arc::new(RwLock::new(ServerDiagnostics::default())), + 0, + true, + subscription_interval, + DEFAULT_LIFETIME_COUNT, + DEFAULT_KEEPALIVE_COUNT, + 0, + ); result.set_state(state); result } #[test] fn basic_subscription() { - let s = Subscription::new(Arc::new(RwLock::new(ServerDiagnostics::default())), 0, true, 1000f64, DEFAULT_LIFETIME_COUNT, DEFAULT_KEEPALIVE_COUNT, 0); + let s = Subscription::new( + Arc::new(RwLock::new(ServerDiagnostics::default())), + 0, + true, + 1000f64, + DEFAULT_LIFETIME_COUNT, + DEFAULT_KEEPALIVE_COUNT, + 0, + ); assert_eq!(s.state(), SubscriptionState::Creating); } @@ -40,7 +59,10 @@ fn update_state_3() { let update_state_result = s.update_state(tick_reason, p); assert_eq!(update_state_result.handled_state, HandledState::Create3); - assert_eq!(update_state_result.update_state_action, UpdateStateAction::SubscriptionCreated); + assert_eq!( + update_state_result.update_state_action, + UpdateStateAction::SubscriptionCreated + ); assert_eq!(s.state(), SubscriptionState::Normal); assert_eq!(s.message_sent(), false); } @@ -74,7 +96,10 @@ fn update_state_4() { let update_state_result = s.update_state(tick_reason, p); assert_eq!(update_state_result.handled_state, HandledState::Normal4); - assert_eq!(update_state_result.update_state_action, UpdateStateAction::None); + assert_eq!( + update_state_result.update_state_action, + UpdateStateAction::None + ); assert_eq!(s.state(), SubscriptionState::Normal); // TODO repeat with publishing enabled true, more notifications false @@ -108,7 +133,10 @@ fn update_state_5() { let update_state_result = s.update_state(tick_reason, p); assert_eq!(update_state_result.handled_state, HandledState::Normal5); - assert_eq!(update_state_result.update_state_action, UpdateStateAction::ReturnNotifications); + assert_eq!( + update_state_result.update_state_action, + UpdateStateAction::ReturnNotifications + ); assert_eq!(s.state(), SubscriptionState::Normal); assert_eq!(s.lifetime_counter(), s.max_lifetime_count()); assert_eq!(s.message_sent(), true); @@ -139,8 +167,14 @@ fn update_state_6() { let update_state_result = s.update_state(tick_reason, p); // ensure 6 - assert_eq!(update_state_result.handled_state, HandledState::IntervalElapsed6); - assert_eq!(update_state_result.update_state_action, UpdateStateAction::ReturnNotifications); + assert_eq!( + update_state_result.handled_state, + HandledState::IntervalElapsed6 + ); + assert_eq!( + update_state_result.update_state_action, + UpdateStateAction::ReturnNotifications + ); assert_eq!(s.state(), SubscriptionState::Normal); assert_eq!(s.lifetime_counter(), 299); assert_eq!(s.message_sent(), true); @@ -168,8 +202,14 @@ fn update_state_7() { let update_state_result = s.update_state(tick_reason, p); - assert_eq!(update_state_result.handled_state, HandledState::IntervalElapsed7); - assert_eq!(update_state_result.update_state_action, UpdateStateAction::ReturnKeepAlive); + assert_eq!( + update_state_result.handled_state, + HandledState::IntervalElapsed7 + ); + assert_eq!( + update_state_result.update_state_action, + UpdateStateAction::ReturnKeepAlive + ); assert_eq!(s.state(), SubscriptionState::Normal); assert_eq!(s.lifetime_counter(), 299); assert_eq!(s.message_sent(), true); @@ -196,8 +236,14 @@ fn update_state_8() { let update_state_result = s.update_state(tick_reason, p); - assert_eq!(update_state_result.handled_state, HandledState::IntervalElapsed8); - assert_eq!(update_state_result.update_state_action, UpdateStateAction::None); + assert_eq!( + update_state_result.handled_state, + HandledState::IntervalElapsed8 + ); + assert_eq!( + update_state_result.update_state_action, + UpdateStateAction::None + ); assert_eq!(s.state(), SubscriptionState::Late); // ensure start publishing timer } @@ -224,8 +270,14 @@ fn update_state_9() { let update_state_result = s.update_state(tick_reason, p); - assert_eq!(update_state_result.handled_state, HandledState::IntervalElapsed9); - assert_eq!(update_state_result.update_state_action, UpdateStateAction::None); + assert_eq!( + update_state_result.handled_state, + HandledState::IntervalElapsed9 + ); + assert_eq!( + update_state_result.update_state_action, + UpdateStateAction::None + ); assert_eq!(s.state(), SubscriptionState::KeepAlive); assert_eq!(s.keep_alive_counter(), s.max_keep_alive_count()); } @@ -247,7 +299,10 @@ fn update_state_10() { let update_state_result = s.update_state(tick_reason, p); assert_eq!(update_state_result.handled_state, HandledState::Late10); - assert_eq!(update_state_result.update_state_action, UpdateStateAction::ReturnNotifications); + assert_eq!( + update_state_result.update_state_action, + UpdateStateAction::ReturnNotifications + ); assert_eq!(s.state(), SubscriptionState::Normal); assert_eq!(s.message_sent(), true); } @@ -269,7 +324,10 @@ fn update_state_11() { let update_state_result = s.update_state(tick_reason, p); assert_eq!(update_state_result.handled_state, HandledState::Late11); - assert_eq!(update_state_result.update_state_action, UpdateStateAction::ReturnKeepAlive); + assert_eq!( + update_state_result.update_state_action, + UpdateStateAction::ReturnKeepAlive + ); assert_eq!(s.state(), SubscriptionState::KeepAlive); assert_eq!(s.message_sent(), true); } @@ -291,7 +349,10 @@ fn update_state_12() { let update_state_result = s.update_state(tick_reason, p); assert_eq!(update_state_result.handled_state, HandledState::Late12); - assert_eq!(update_state_result.update_state_action, UpdateStateAction::None); + assert_eq!( + update_state_result.update_state_action, + UpdateStateAction::None + ); assert_eq!(s.state(), SubscriptionState::Late); } @@ -310,7 +371,10 @@ fn update_state_13() { let update_state_result = s.update_state(tick_reason, p); assert_eq!(update_state_result.handled_state, HandledState::KeepAlive13); - assert_eq!(update_state_result.update_state_action, UpdateStateAction::None); + assert_eq!( + update_state_result.update_state_action, + UpdateStateAction::None + ); assert_eq!(s.state(), SubscriptionState::KeepAlive); } @@ -331,7 +395,10 @@ fn update_state_14() { let update_state_result = s.update_state(tick_reason, p); assert_eq!(update_state_result.handled_state, HandledState::KeepAlive14); - assert_eq!(update_state_result.update_state_action, UpdateStateAction::ReturnNotifications); + assert_eq!( + update_state_result.update_state_action, + UpdateStateAction::ReturnNotifications + ); assert_eq!(s.state(), SubscriptionState::Normal); } @@ -353,7 +420,10 @@ fn update_state_15() { let update_state_result = s.update_state(tick_reason, p); assert_eq!(update_state_result.handled_state, HandledState::KeepAlive15); - assert_eq!(update_state_result.update_state_action, UpdateStateAction::ReturnKeepAlive); + assert_eq!( + update_state_result.update_state_action, + UpdateStateAction::ReturnKeepAlive + ); assert_eq!(s.state(), SubscriptionState::KeepAlive); assert_eq!(s.keep_alive_counter(), s.max_keep_alive_count()); } @@ -376,7 +446,10 @@ fn update_state_16() { let update_state_result = s.update_state(tick_reason, p); assert_eq!(update_state_result.handled_state, HandledState::KeepAlive16); - assert_eq!(update_state_result.update_state_action, UpdateStateAction::None); + assert_eq!( + update_state_result.update_state_action, + UpdateStateAction::None + ); assert_eq!(s.state(), SubscriptionState::KeepAlive); assert_eq!(s.keep_alive_counter(), 4); } @@ -398,7 +471,10 @@ fn update_state_17() { let update_state_result = s.update_state(tick_reason, p); assert_eq!(update_state_result.handled_state, HandledState::KeepAlive17); - assert_eq!(update_state_result.update_state_action, UpdateStateAction::None); + assert_eq!( + update_state_result.update_state_action, + UpdateStateAction::None + ); assert_eq!(s.state(), SubscriptionState::Late); assert_eq!(s.keep_alive_counter(), 1); } @@ -430,7 +506,10 @@ fn update_state_27() { let update_state_result = s.update_state(tick_reason, p); assert_eq!(update_state_result.handled_state, HandledState::Closed27); - assert_eq!(update_state_result.update_state_action, UpdateStateAction::SubscriptionExpired); + assert_eq!( + update_state_result.update_state_action, + UpdateStateAction::SubscriptionExpired + ); assert_eq!(s.state(), SubscriptionState::Closed); assert_eq!(s.lifetime_counter(), 1); assert_eq!(s.message_sent(), false); diff --git a/server/src/tests/subscriptions/subscriptions.rs b/server/src/tests/subscriptions/subscriptions.rs index bf834aa85..524dce7e6 100644 --- a/server/src/tests/subscriptions/subscriptions.rs +++ b/server/src/tests/subscriptions/subscriptions.rs @@ -1,2 +1,2 @@ // Tests related to the Subscriptions struct go here, in particular relating to publish request -// and response handling. \ No newline at end of file +// and response handling. diff --git a/server/src/util/mod.rs b/server/src/util/mod.rs index c009c0cb3..ca6e316f8 100644 --- a/server/src/util/mod.rs +++ b/server/src/util/mod.rs @@ -4,11 +4,11 @@ //! Provides utility routines for things that might be used in a number of places elsewhere. -use std::time::{Instant, Duration}; use std::sync::{Arc, RwLock}; +use std::time::{Duration, Instant}; -use futures::Future; use futures::future; +use futures::Future; use futures::Stream; use tokio; @@ -23,8 +23,13 @@ use crate::state::ServerState; pub struct PollingAction {} impl PollingAction { - pub fn spawn(server_state: Arc>, interval_ms: u64, action: F) -> PollingAction - where F: 'static + Fn() + Send + pub fn spawn( + server_state: Arc>, + interval_ms: u64, + action: F, + ) -> PollingAction + where + F: 'static + Fn() + Send, { let server_state_take_while = server_state.clone(); let f = Interval::new(Instant::now(), Duration::from_millis(interval_ms)) @@ -33,14 +38,10 @@ impl PollingAction { let server_state = trace_read_lock_unwrap!(server_state_take_while); // If the server aborts or is in a failed state, this polling timer will stop let abort = match server_state.state() { - ServerStateType::Failed | - ServerStateType::NoConfiguration | - ServerStateType::Shutdown => { - true - } - _ => { - server_state.is_abort() - } + ServerStateType::Failed + | ServerStateType::NoConfiguration + | ServerStateType::Shutdown => true, + _ => server_state.is_abort(), }; if abort { debug!("Polling action is stopping due to server state / abort"); diff --git a/tools/certificate-creator/src/main.rs b/tools/certificate-creator/src/main.rs index 55e6550ae..46c56145b 100644 --- a/tools/certificate-creator/src/main.rs +++ b/tools/certificate-creator/src/main.rs @@ -8,7 +8,10 @@ fn main() { println!(" Key size = {}", x509_data.key_size); println!(" CN (common name) = \"{}\"", x509_data.common_name); println!(" O (organization) = \"{}\"", x509_data.organization); - println!(" OU (organizational unit) = \"{}\"", x509_data.organizational_unit); + println!( + " OU (organizational unit) = \"{}\"", + x509_data.organizational_unit + ); println!(" C (country) = \"{}\"", x509_data.country); println!(" ST (state) = \"{}\"", x509_data.state); println!(" Duration = {} days", x509_data.certificate_duration_days); @@ -21,11 +24,17 @@ fn main() { } let cert_store = CertificateStore::new(&path); - if cert_store.create_and_store_application_instance_cert(&x509_data, overwrite).is_err() { + if cert_store + .create_and_store_application_instance_cert(&x509_data, overwrite) + .is_err() + { eprintln!("Certificate creation failed, check above for errors"); } else { - println!("Certificate and private key have been written to {} and {}", - cert_store.own_certificate_path().display(), cert_store.own_private_key_path().display()); + println!( + "Certificate and private key have been written to {} and {}", + cert_store.own_certificate_path().display(), + cert_store.own_private_key_path().display() + ); } } } @@ -53,23 +62,44 @@ impl Args { Ok(Args { help: args.contains(["-h", "--help"]), overwrite: args.contains(["-o", "--overwrite"]), - key_size: args.opt_value_from_str("--key-size")?.unwrap_or(DEFAULT_KEY_SIZE), - pki_path: args.opt_value_from_str("--pkipath")?.unwrap_or(String::from(DEFAULT_PKI_PATH)), - duration: args.opt_value_from_str("--duration")?.unwrap_or(DEFAULT_DURATION), - application_uri: args.opt_value_from_str("--application-uri")?.unwrap_or(String::from(DEFAULT_APPLICATION_URI)), - hostnames: args.opt_value_from_str("--hostnames")?.unwrap_or(String::from("")), + key_size: args + .opt_value_from_str("--key-size")? + .unwrap_or(DEFAULT_KEY_SIZE), + pki_path: args + .opt_value_from_str("--pkipath")? + .unwrap_or(String::from(DEFAULT_PKI_PATH)), + duration: args + .opt_value_from_str("--duration")? + .unwrap_or(DEFAULT_DURATION), + application_uri: args + .opt_value_from_str("--application-uri")? + .unwrap_or(String::from(DEFAULT_APPLICATION_URI)), + hostnames: args + .opt_value_from_str("--hostnames")? + .unwrap_or(String::from("")), add_computer_name: args.contains("--add-computer-name"), add_localhost_name: args.contains("--add-localhost-name"), - common_name: args.opt_value_from_str("--CN")?.unwrap_or(String::from(DEFAULT_CN)), - organization: args.opt_value_from_str("--O")?.unwrap_or(String::from(DEFAULT_O)), - organizational_unit: args.opt_value_from_str("--OU")?.unwrap_or(String::from(DEFAULT_OU)), - country: args.opt_value_from_str("--C")?.unwrap_or(String::from(DEFAULT_C)), - state: args.opt_value_from_str("--ST")?.unwrap_or(String::from(DEFAULT_ST)), + common_name: args + .opt_value_from_str("--CN")? + .unwrap_or(String::from(DEFAULT_CN)), + organization: args + .opt_value_from_str("--O")? + .unwrap_or(String::from(DEFAULT_O)), + organizational_unit: args + .opt_value_from_str("--OU")? + .unwrap_or(String::from(DEFAULT_OU)), + country: args + .opt_value_from_str("--C")? + .unwrap_or(String::from(DEFAULT_C)), + state: args + .opt_value_from_str("--ST")? + .unwrap_or(String::from(DEFAULT_ST)), }) } pub fn usage() { - println!(r#"OPC UA Certificate Creator + println!( + r#"OPC UA Certificate Creator This creates a self-signed key (private/private.pem) and X509 certificate (own/cert.der) for use with OPC UA clients and servers. Use the flags to control what the certificate contains. For @@ -91,7 +121,16 @@ Usage: --OU name Specifies the Organization Unit for the cert (default: {}). --C name Specifies the Country for the cert (default: {}). --ST name "Specifies the State for the cert. (default: {})"#, - DEFAULT_KEY_SIZE, DEFAULT_PKI_PATH, DEFAULT_DURATION, DEFAULT_APPLICATION_URI, DEFAULT_CN, DEFAULT_O, DEFAULT_OU, DEFAULT_C, DEFAULT_ST); + DEFAULT_KEY_SIZE, + DEFAULT_PKI_PATH, + DEFAULT_DURATION, + DEFAULT_APPLICATION_URI, + DEFAULT_CN, + DEFAULT_O, + DEFAULT_OU, + DEFAULT_C, + DEFAULT_ST + ); } } @@ -107,8 +146,7 @@ const DEFAULT_ST: &'static str = "Dublin"; fn parse_x509_args() -> Result<(X509Data, bool, PathBuf), ()> { // Read command line arguments - let args = Args::parse_args() - .map_err(|_| Args::usage())?; + let args = Args::parse_args().map_err(|_| Args::usage())?; if args.help || ![2048u16, 4096u16].contains(&args.key_size) || args.duration == 0 { Args::usage(); Err(()) @@ -129,7 +167,12 @@ fn parse_x509_args() -> Result<(X509Data, bool, PathBuf), ()> { // Create alt host names for application uri, localhost and computer name if required let hostnames: Vec = args.hostnames.split(",").map(|s| s.to_string()).collect(); - let alt_host_names = X509Data::alt_host_names(&application_uri, Some(hostnames), add_localhost, add_computer_name); + let alt_host_names = X509Data::alt_host_names( + &application_uri, + Some(hostnames), + add_localhost, + add_computer_name, + ); // Add the host names that were supplied by argument if alt_host_names.len() == 1 { @@ -137,15 +180,19 @@ fn parse_x509_args() -> Result<(X509Data, bool, PathBuf), ()> { return Err(()); } - Ok((X509Data { - key_size, - common_name, - organization, - organizational_unit, - country, - state, - alt_host_names, - certificate_duration_days, - }, overwrite, PathBuf::from(&pki_path))) + Ok(( + X509Data { + key_size, + common_name, + organization, + organizational_unit, + country, + state, + alt_host_names, + certificate_duration_days, + }, + overwrite, + PathBuf::from(&pki_path), + )) } } diff --git a/types/src/argument.rs b/types/src/argument.rs index cacb2ab46..13755dff9 100644 --- a/types/src/argument.rs +++ b/types/src/argument.rs @@ -5,10 +5,7 @@ use std::io::{Read, Write}; use crate::{ - encoding::*, - localized_text::LocalizedText, - node_id::NodeId, - status_codes::StatusCode, + encoding::*, localized_text::LocalizedText, node_id::NodeId, status_codes::StatusCode, string::UAString, }; @@ -84,4 +81,4 @@ impl BinaryEncoder for Argument { description, }) } -} \ No newline at end of file +} diff --git a/types/src/array.rs b/types/src/array.rs index a2da7a135..853745fa8 100644 --- a/types/src/array.rs +++ b/types/src/array.rs @@ -1,6 +1,4 @@ -use crate::{ - variant::*, -}; +use crate::variant::*; pub(crate) const ARRAY_DIMENSIONS_BIT: u8 = 1 << 6; pub(crate) const ARRAY_VALUES_BIT: u8 = 1 << 7; @@ -21,7 +19,10 @@ pub struct Array { } impl Array { - pub fn new_single(values: V) -> Array where V: Into> { + pub fn new_single(values: V) -> Array + where + V: Into>, + { Array { values: values.into(), dimensions: Vec::new(), @@ -29,7 +30,10 @@ impl Array { } pub fn new_multi(values: V, dimensions: D) -> Array - where V: Into>, D: Into> { + where + V: Into>, + D: Into>, + { Array { values: values.into(), dimensions: dimensions.into(), @@ -96,8 +100,10 @@ pub fn values_are_of_type(values: &[Variant], expected_type: VariantTypeId) -> b // Ensure all remaining elements are the same type as the first element let found_unexpected = values.iter().any(|v| v.type_id() != expected_type); if found_unexpected { - error!("Variant array's type is expected to be {:?} but found other types in it", expected_type); + error!( + "Variant array's type is expected to be {:?} but found other types in it", + expected_type + ); }; !found_unexpected } - diff --git a/types/src/byte_string.rs b/types/src/byte_string.rs index f0fa51bbc..bc6f54466 100644 --- a/types/src/byte_string.rs +++ b/types/src/byte_string.rs @@ -10,9 +10,12 @@ use std::io::{Read, Write}; use base64; use crate::{ - encoding::{BinaryEncoder, DecodingLimits, EncodingResult, process_decode_io_result, process_encode_io_result, write_i32}, - Guid, + encoding::{ + process_decode_io_result, process_encode_io_result, write_i32, BinaryEncoder, + DecodingLimits, EncodingResult, + }, status_codes::StatusCode, + Guid, }; /// A sequence of octets. @@ -23,14 +26,22 @@ pub struct ByteString { impl AsRef<[u8]> for ByteString { fn as_ref(&self) -> &[u8] { - if self.value.is_none() { &[] } else { self.value.as_ref().unwrap() } + if self.value.is_none() { + &[] + } else { + self.value.as_ref().unwrap() + } } } impl BinaryEncoder for ByteString { fn byte_len(&self) -> usize { // Length plus the actual length of bytes (if not null) - 4 + if self.value.is_none() { 0 } else { self.value.as_ref().unwrap().len() } + 4 + if self.value.is_none() { + 0 + } else { + self.value.as_ref().unwrap().len() + } } fn encode(&self, stream: &mut S) -> EncodingResult { @@ -56,20 +67,24 @@ impl BinaryEncoder for ByteString { error!("ByteString buf length is a negative number {}", len); Err(StatusCode::BadDecodingError) } else if len as usize > decoding_limits.max_byte_string_length { - error!("ByteString length {} exceeds decoding limit {}", len, decoding_limits.max_string_length); + error!( + "ByteString length {} exceeds decoding limit {}", + len, decoding_limits.max_string_length + ); Err(StatusCode::BadDecodingError) } else { // Create a buffer filled with zeroes and read the byte string over the top let mut buf: Vec = vec![0u8; len as usize]; process_decode_io_result(stream.read_exact(&mut buf))?; - Ok(ByteString { - value: Some(buf) - }) + Ok(ByteString { value: Some(buf) }) } } } -impl<'a, T> From<&'a T> for ByteString where T: AsRef<[u8]> + ?Sized { +impl<'a, T> From<&'a T> for ByteString +where + T: AsRef<[u8]> + ?Sized, +{ fn from(value: &'a T) -> Self { Self::from(value.as_ref().to_vec()) } @@ -223,4 +238,4 @@ fn bytestring_substring() { assert!(v.substring(4, 10000).is_err()); assert!(ByteString::null().substring(0, 0).is_err()); -} \ No newline at end of file +} diff --git a/types/src/data_value.rs b/types/src/data_value.rs index e9ba113ea..5898a438d 100644 --- a/types/src/data_value.rs +++ b/types/src/data_value.rs @@ -7,17 +7,9 @@ use std::io::{Read, Write}; use crate::{ - byte_string::ByteString, - date_time::*, - encoding::*, - guid::Guid, - localized_text::LocalizedText, - node_id::NodeId, - qualified_name::QualifiedName, - service_types::TimestampsToReturn, - status_codes::StatusCode, - string::UAString, - variant::Variant, + byte_string::ByteString, date_time::*, encoding::*, guid::Guid, localized_text::LocalizedText, + node_id::NodeId, qualified_name::QualifiedName, service_types::TimestampsToReturn, + status_codes::StatusCode, string::UAString, variant::Variant, }; bitflags! { @@ -116,7 +108,8 @@ impl BinaryEncoder for DataValue { } fn decode(stream: &mut S, decoding_limits: &DecodingLimits) -> EncodingResult { - let encoding_mask = DataValueFlags::from_bits_truncate(u8::decode(stream, decoding_limits)?); + let encoding_mask = + DataValueFlags::from_bits_truncate(u8::decode(stream, decoding_limits)?); // Value let value = if encoding_mask.contains(DataValueFlags::HAS_VALUE) { @@ -157,100 +150,149 @@ impl BinaryEncoder for DataValue { Ok(DataValue { value, status, - source_picoseconds: if source_timestamp.is_some() { source_picoseconds } else { None }, + source_picoseconds: if source_timestamp.is_some() { + source_picoseconds + } else { + None + }, source_timestamp, - server_picoseconds: if server_timestamp.is_some() { server_picoseconds } else { None }, + server_picoseconds: if server_timestamp.is_some() { + server_picoseconds + } else { + None + }, server_timestamp, }) } } - // It would be nice if everything from here to the ... below could be condensed into a single // trait impl somehow because it's more or less duplicating all the code in Variant. impl From for DataValue { - fn from(v: bool) -> Self { Self::from(Variant::from(v)) } + fn from(v: bool) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: u8) -> Self { Self::from(Variant::from(v)) } + fn from(v: u8) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: i8) -> Self { Self::from(Variant::from(v)) } + fn from(v: i8) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: i16) -> Self { Self::from(Variant::from(v)) } + fn from(v: i16) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: u16) -> Self { Self::from(Variant::from(v)) } + fn from(v: u16) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: i32) -> Self { Self::from(Variant::from(v)) } + fn from(v: i32) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: u32) -> Self { Self::from(Variant::from(v)) } + fn from(v: u32) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: i64) -> Self { Self::from(Variant::from(v)) } + fn from(v: i64) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: u64) -> Self { Self::from(Variant::from(v)) } + fn from(v: u64) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: f32) -> Self { Self::from(Variant::from(v)) } + fn from(v: f32) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: f64) -> Self { Self::from(Variant::from(v)) } + fn from(v: f64) -> Self { + Self::from(Variant::from(v)) + } } impl<'a> From<&'a str> for DataValue { - fn from(v: &'a str) -> Self { Self::from(Variant::from(v)) } + fn from(v: &'a str) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: String) -> Self { Self::from(Variant::from(v)) } + fn from(v: String) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: UAString) -> Self { Self::from(Variant::from(v)) } + fn from(v: UAString) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: DateTime) -> Self { Self::from(Variant::from(v)) } + fn from(v: DateTime) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: Guid) -> Self { Self::from(Variant::from(v)) } + fn from(v: Guid) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: StatusCode) -> Self { Self::from(Variant::from(v)) } + fn from(v: StatusCode) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: ByteString) -> Self { Self::from(Variant::from(v)) } + fn from(v: ByteString) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: QualifiedName) -> Self { Self::from(Variant::from(v)) } + fn from(v: QualifiedName) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: LocalizedText) -> Self { Self::from(Variant::from(v)) } + fn from(v: LocalizedText) -> Self { + Self::from(Variant::from(v)) + } } impl From for DataValue { - fn from(v: NodeId) -> Self { Self::from(Variant::from(v)) } + fn from(v: NodeId) -> Self { + Self::from(Variant::from(v)) + } } //... (see above) @@ -310,7 +352,10 @@ impl Default for DataValue { impl DataValue { /// Creates a `DataValue` from the supplied value with nothing else. - pub fn value_only(value: V) -> DataValue where V: Into { + pub fn value_only(value: V) -> DataValue + where + V: Into, + { DataValue { value: Some(value.into()), status: None, @@ -328,7 +373,10 @@ impl DataValue { /// The Server returns a Bad_WriteNotSupported error if it does not support writing of timestamps_ /// /// In which case, use the `value_only()` constructor, or make explicit which fields you pass. - pub fn new_now(value: V) -> DataValue where V: Into { + pub fn new_now(value: V) -> DataValue + where + V: Into, + { let now = DateTime::now(); DataValue { value: Some(value.into()), @@ -353,7 +401,14 @@ impl DataValue { } /// Sets the value of the data value, updating the timestamps at the same point - pub fn set_value(&mut self, value: V, source_timestamp: &DateTime, server_timestamp: &DateTime) where V: Into { + pub fn set_value( + &mut self, + value: V, + source_timestamp: &DateTime, + server_timestamp: &DateTime, + ) where + V: Into, + { self.value = Some(value.into()); self.source_timestamp = Some(source_timestamp.clone()); self.source_picoseconds = Some(0); @@ -362,7 +417,12 @@ impl DataValue { } /// Sets the timestamps of the data value based on supplied timestamps to return - pub fn set_timestamps(&mut self, timestamps_to_return: TimestampsToReturn, source_timestamp: DateTime, server_timestamp: DateTime) { + pub fn set_timestamps( + &mut self, + timestamps_to_return: TimestampsToReturn, + source_timestamp: DateTime, + server_timestamp: DateTime, + ) { match timestamps_to_return { TimestampsToReturn::Source => { self.source_timestamp = Some(source_timestamp); @@ -425,4 +485,4 @@ impl DataValue { } encoding_mask } -} \ No newline at end of file +} diff --git a/types/src/date_time.rs b/types/src/date_time.rs index 3264f3bdd..2fa106a62 100644 --- a/types/src/date_time.rs +++ b/types/src/date_time.rs @@ -10,7 +10,7 @@ use std::{ str::FromStr, }; -use chrono::{self, Datelike, Timelike, TimeZone, Utc}; +use chrono::{self, Datelike, TimeZone, Timelike, Utc}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use crate::encoding::*; @@ -32,14 +32,19 @@ pub struct DateTime { } impl Serialize for DateTime { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { let ticks = self.checked_ticks(); ticks.serialize(serializer) } } impl<'de> Deserialize<'de> for DateTime { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, { let ticks = i64::deserialize(deserializer)?; Ok(DateTime::from(ticks)) @@ -99,8 +104,12 @@ impl From<(u16, u16, u16, u16, u16, u16, u32)> for DateTime { if nanos as i64 >= NANOS_PER_SECOND { panic!("Invalid nanosecond"); } - let dt = Utc.ymd(year as i32, month as u32, day as u32) - .and_hms_nano(hour as u32, minute as u32, second as u32, nanos); + let dt = Utc.ymd(year as i32, month as u32, day as u32).and_hms_nano( + hour as u32, + minute as u32, + second as u32, + nanos, + ); DateTime::from(dt) } } @@ -115,7 +124,8 @@ impl From for DateTime { let minute = date_time.minute(); let second = date_time.second(); let nanos = (date_time.nanosecond() / NANOS_PER_TICK as u32) * NANOS_PER_TICK as u32; - let date_time = Utc.ymd(year, month, day) + let date_time = Utc + .ymd(year, month, day) .and_hms_nano(hour, minute, second, nanos); DateTime { date_time } } @@ -157,11 +167,11 @@ impl FromStr for DateTime { type Err = (); fn from_str(s: &str) -> Result { - DateTimeUtc::from_str(s).map(|d| { - DateTime::from(d) - }).map_err(|e| { - error!("Cannot parse date {}, error = {}", s, e); - }) + DateTimeUtc::from_str(s) + .map(|d| DateTime::from(d)) + .map_err(|e| { + error!("Cannot parse date {}, error = {}", s, e); + }) } } @@ -203,24 +213,27 @@ impl DateTime { } /// Constructs from a year, month, day, hour, minute, second - pub fn ymd_hms(year: u16, - month: u16, - day: u16, - hour: u16, - minute: u16, - second: u16) - -> DateTime { + pub fn ymd_hms( + year: u16, + month: u16, + day: u16, + hour: u16, + minute: u16, + second: u16, + ) -> DateTime { DateTime::from((year, month, day, hour, minute, second)) } /// Constructs from a year, month, day, hour, minute, second, nanosecond - pub fn ymd_hms_nano(year: u16, - month: u16, - day: u16, - hour: u16, - minute: u16, - second: u16, - nanos: u32) -> DateTime { + pub fn ymd_hms_nano( + year: u16, + month: u16, + day: u16, + hour: u16, + minute: u16, + second: u16, + nanos: u32, + ) -> DateTime { DateTime::from((year, month, day, hour, minute, second, nanos)) } diff --git a/types/src/diagnostic_info.rs b/types/src/diagnostic_info.rs index 9fc851d74..7b5a8cbca 100644 --- a/types/src/diagnostic_info.rs +++ b/types/src/diagnostic_info.rs @@ -6,11 +6,7 @@ use std::io::{Read, Write}; -use crate::{ - encoding::*, - status_codes::StatusCode, - string::UAString, -}; +use crate::{encoding::*, status_codes::StatusCode, string::UAString}; bitflags! { pub struct DiagnosticInfoMask: u8 { @@ -138,7 +134,8 @@ impl BinaryEncoder for DiagnosticInfo { } fn decode(stream: &mut S, decoding_limits: &DecodingLimits) -> EncodingResult { - let encoding_mask = DiagnosticInfoMask::from_bits_truncate(u8::decode(stream, decoding_limits)?); + let encoding_mask = + DiagnosticInfoMask::from_bits_truncate(u8::decode(stream, decoding_limits)?); let mut diagnostic_info = DiagnosticInfo::default(); if encoding_mask.contains(DiagnosticInfoMask::HAS_SYMBOLIC_ID) { @@ -167,7 +164,8 @@ impl BinaryEncoder for DiagnosticInfo { } if encoding_mask.contains(DiagnosticInfoMask::HAS_INNER_DIAGNOSTIC_INFO) { // Read inner diagnostic info - diagnostic_info.inner_diagnostic_info = Some(Box::new(DiagnosticInfo::decode(stream, decoding_limits)?)); + diagnostic_info.inner_diagnostic_info = + Some(Box::new(DiagnosticInfo::decode(stream, decoding_limits)?)); } Ok(diagnostic_info) } diff --git a/types/src/encoding.rs b/types/src/encoding.rs index d01082a0a..e1b7251f8 100644 --- a/types/src/encoding.rs +++ b/types/src/encoding.rs @@ -13,10 +13,7 @@ use std::{ use byteorder::{ByteOrder, LittleEndian, WriteBytesExt}; -use crate::{ - constants, - status_codes::StatusCode, -}; +use crate::{constants, status_codes::StatusCode}; pub type EncodingResult = std::result::Result; @@ -88,7 +85,10 @@ pub fn process_encode_io_result(result: Result) -> EncodingResult } /// Converts an IO encoding error (and logs when in error) into an EncodingResult -pub fn process_decode_io_result(result: Result) -> EncodingResult where T: Debug { +pub fn process_decode_io_result(result: Result) -> EncodingResult +where + T: Debug, +{ result.map_err(|err| { trace!("Decoding error - {:?}", err); StatusCode::BadDecodingError @@ -105,7 +105,10 @@ pub fn byte_len_array>(values: &Option>) -> usize { } /// Write an array of the encoded type to stream, preserving distinction between null array and empty array -pub fn write_array>(stream: &mut S, values: &Option>) -> EncodingResult { +pub fn write_array>( + stream: &mut S, + values: &Option>, +) -> EncodingResult { let mut size = 0; if let Some(ref values) = values { size += write_i32(stream, values.len() as i32)?; @@ -119,7 +122,10 @@ pub fn write_array>(stream: &mut S, values: &Optio } /// Reads an array of the encoded type from a stream, preserving distinction between null array and empty array -pub fn read_array>(stream: &mut S, decoding_limits: &DecodingLimits) -> EncodingResult>> { +pub fn read_array>( + stream: &mut S, + decoding_limits: &DecodingLimits, +) -> EncodingResult>> { let len = read_i32(stream)?; if len == -1 { Ok(None) @@ -127,7 +133,10 @@ pub fn read_array>(stream: &mut S, decoding_limits: error!("Array length is negative value and invalid"); Err(StatusCode::BadDecodingError) } else if len as usize > decoding_limits.max_array_length { - error!("Array length {} exceeds decoding limit {}", len, decoding_limits.max_array_length); + error!( + "Array length {} exceeds decoding limit {}", + len, decoding_limits.max_array_length + ); Err(StatusCode::BadDecodingError) } else { let mut values: Vec = Vec::with_capacity(len as usize); @@ -141,69 +150,97 @@ pub fn read_array>(stream: &mut S, decoding_limits: /// Writes a series of identical bytes to the stream pub fn write_bytes(stream: &mut dyn Write, value: u8, count: usize) -> EncodingResult { for _ in 0..count { - let _ = stream.write_u8(value) + let _ = stream + .write_u8(value) .map_err(|_| StatusCode::BadEncodingError)?; } Ok(count) } /// Writes an unsigned byte to the stream -pub fn write_u8(stream: &mut dyn Write, value: T) -> EncodingResult where T: Into { +pub fn write_u8(stream: &mut dyn Write, value: T) -> EncodingResult +where + T: Into, +{ let buf: [u8; 1] = [value.into()]; process_encode_io_result(stream.write(&buf)) } /// Writes a signed 16-bit value to the stream -pub fn write_i16(stream: &mut dyn Write, value: T) -> EncodingResult where T: Into { +pub fn write_i16(stream: &mut dyn Write, value: T) -> EncodingResult +where + T: Into, +{ let mut buf = [0u8; 2]; LittleEndian::write_i16(&mut buf, value.into()); process_encode_io_result(stream.write(&buf)) } /// Writes an unsigned 16-bit value to the stream -pub fn write_u16(stream: &mut dyn Write, value: T) -> EncodingResult where T: Into { +pub fn write_u16(stream: &mut dyn Write, value: T) -> EncodingResult +where + T: Into, +{ let mut buf = [0u8; 2]; LittleEndian::write_u16(&mut buf, value.into()); process_encode_io_result(stream.write(&buf)) } /// Writes a signed 32-bit value to the stream -pub fn write_i32(stream: &mut dyn Write, value: T) -> EncodingResult where T: Into { +pub fn write_i32(stream: &mut dyn Write, value: T) -> EncodingResult +where + T: Into, +{ let mut buf = [0u8; 4]; LittleEndian::write_i32(&mut buf, value.into()); process_encode_io_result(stream.write(&buf)) } /// Writes an unsigned 32-bit value to the stream -pub fn write_u32(stream: &mut dyn Write, value: T) -> EncodingResult where T: Into { +pub fn write_u32(stream: &mut dyn Write, value: T) -> EncodingResult +where + T: Into, +{ let mut buf = [0u8; 4]; LittleEndian::write_u32(&mut buf, value.into()); process_encode_io_result(stream.write(&buf)) } /// Writes a signed 64-bit value to the stream -pub fn write_i64(stream: &mut dyn Write, value: T) -> EncodingResult where T: Into { +pub fn write_i64(stream: &mut dyn Write, value: T) -> EncodingResult +where + T: Into, +{ let mut buf = [0u8; 8]; LittleEndian::write_i64(&mut buf, value.into()); process_encode_io_result(stream.write(&buf)) } /// Writes an unsigned 64-bit value to the stream -pub fn write_u64(stream: &mut dyn Write, value: T) -> EncodingResult where T: Into { +pub fn write_u64(stream: &mut dyn Write, value: T) -> EncodingResult +where + T: Into, +{ let mut buf = [0u8; 8]; LittleEndian::write_u64(&mut buf, value.into()); process_encode_io_result(stream.write(&buf)) } /// Writes a 32-bit precision value to the stream -pub fn write_f32(stream: &mut dyn Write, value: T) -> EncodingResult where T: Into { +pub fn write_f32(stream: &mut dyn Write, value: T) -> EncodingResult +where + T: Into, +{ let mut buf = [0u8; 4]; LittleEndian::write_f32(&mut buf, value.into()); process_encode_io_result(stream.write(&buf)) } /// Writes a 64-bit precision value to the stream -pub fn write_f64(stream: &mut dyn Write, value: T) -> EncodingResult where T: Into { +pub fn write_f64(stream: &mut dyn Write, value: T) -> EncodingResult +where + T: Into, +{ let mut buf = [0u8; 8]; LittleEndian::write_f64(&mut buf, value.into()); process_encode_io_result(stream.write(&buf)) diff --git a/types/src/extension_object.rs b/types/src/extension_object.rs index 1bae78f65..8fa847ba7 100644 --- a/types/src/extension_object.rs +++ b/types/src/extension_object.rs @@ -7,12 +7,8 @@ use std::io::{Cursor, Read, Write}; use crate::{ - byte_string::ByteString, - encoding::*, - node_id::NodeId, - node_ids::ObjectId, - status_codes::StatusCode, - string::XmlElement, + byte_string::ByteString, encoding::*, node_id::NodeId, node_ids::ObjectId, + status_codes::StatusCode, string::XmlElement, }; /// Enumeration that holds the kinds of encoding that an ExtensionObject data may be encoded with. @@ -76,9 +72,7 @@ impl BinaryEncoder for ExtensionObject { let node_id = NodeId::decode(stream, decoding_limits)?; let encoding_type = u8::decode(stream, decoding_limits)?; let body = match encoding_type { - 0x0 => { - ExtensionObjectEncoding::None - } + 0x0 => ExtensionObjectEncoding::None, 0x1 => { ExtensionObjectEncoding::ByteString(ByteString::decode(stream, decoding_limits)?) } @@ -90,10 +84,7 @@ impl BinaryEncoder for ExtensionObject { return Err(StatusCode::BadDecodingError); } }; - Ok(ExtensionObject { - node_id, - body, - }) + Ok(ExtensionObject { node_id, body }) } } @@ -113,10 +104,11 @@ impl ExtensionObject { /// Tests for empty body. pub fn is_empty(&self) -> bool { - self.is_null() || match self.body { - ExtensionObjectEncoding::None => true, - _ => false - } + self.is_null() + || match self.body { + ExtensionObjectEncoding::None => true, + _ => false, + } } /// Returns the object id of the thing this extension object contains, assuming the @@ -127,8 +119,11 @@ impl ExtensionObject { /// Creates an extension object with the specified node id and the encodable object as its payload. /// The body is set to a byte string containing the encoded struct. - pub fn from_encodable(node_id: N, encodable: &T) -> ExtensionObject where N: Into, - T: BinaryEncoder { + pub fn from_encodable(node_id: N, encodable: &T) -> ExtensionObject + where + N: Into, + T: BinaryEncoder, + { // Serialize to extension object let mut stream = Cursor::new(vec![0u8; encodable.byte_len()]); let _ = encodable.encode(&mut stream); @@ -141,7 +136,10 @@ impl ExtensionObject { /// Decodes the inner content of the extension object and returns it. The node id is ignored /// for decoding. The caller supplies the binary encoder impl that should be used to extract /// the data. Errors result in a decoding error. - pub fn decode_inner(&self, decoding_limits: &DecodingLimits) -> EncodingResult where T: BinaryEncoder { + pub fn decode_inner(&self, decoding_limits: &DecodingLimits) -> EncodingResult + where + T: BinaryEncoder, + { match self.body { ExtensionObjectEncoding::ByteString(ref byte_string) => { if let Some(ref value) = byte_string.value { diff --git a/types/src/guid.rs b/types/src/guid.rs index 7696fd8ca..195d4bf3d 100644 --- a/types/src/guid.rs +++ b/types/src/guid.rs @@ -21,26 +21,27 @@ pub struct Guid { } impl Serialize for Guid { - fn serialize(&self, serializer: S) -> Result where S: Serializer { + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { self.uuid.to_string().serialize(serializer) } } impl<'de> Deserialize<'de> for Guid { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, { use serde::de::Error; let result = String::deserialize(deserializer); match result { - Ok(uuid) => { - Uuid::parse_str(&uuid) - .map(|uuid| Guid { uuid }) - .map_err(|_| D::Error::custom("Invalid uuid")) - } - Err(err) => { - Err(err) - } + Ok(uuid) => Uuid::parse_str(&uuid) + .map(|uuid| Guid { uuid }) + .map_err(|_| D::Error::custom("Invalid uuid")), + Err(err) => Err(err), } } } @@ -71,7 +72,9 @@ impl BinaryEncoder for Guid { fn decode(stream: &mut S, _: &DecodingLimits) -> EncodingResult { let mut bytes = [0u8; 16]; process_decode_io_result(stream.read_exact(&mut bytes))?; - Ok(Guid { uuid: Uuid::from_bytes(bytes) }) + Ok(Guid { + uuid: Uuid::from_bytes(bytes), + }) } } @@ -79,9 +82,7 @@ impl FromStr for Guid { type Err = (); fn from_str(s: &str) -> Result { - Uuid::from_str(s).map(|uuid| { - Guid { uuid } - }).map_err(|err| { + Uuid::from_str(s).map(|uuid| Guid { uuid }).map_err(|err| { error!("Guid cannot be parsed from string, err = {:?}", err); }) } @@ -101,7 +102,9 @@ impl Guid { /// Creates a random Guid pub fn new() -> Guid { - Guid { uuid: Uuid::new_v4() } + Guid { + uuid: Uuid::new_v4(), + } } /// Returns the bytes of the Guid @@ -111,6 +114,8 @@ impl Guid { // Creates a guid from bytes pub fn from_bytes(bytes: [u8; 16]) -> Guid { - Guid { uuid: Uuid::from_bytes(bytes) } + Guid { + uuid: Uuid::from_bytes(bytes), + } } } diff --git a/types/src/lib.rs b/types/src/lib.rs index 994ae28cd..cf836342b 100644 --- a/types/src/lib.rs +++ b/types/src/lib.rs @@ -26,10 +26,13 @@ extern crate serde_json; ///Contains constants recognized by OPC UA clients and servers to describe various protocols and /// profiles used during communication and encryption. pub mod profiles { - pub const TRANSPORT_PROFILE_URI_BINARY: &str = "http://opcfoundation.org/UA-Profile/Transport/uatcp-uasc-uabinary"; + pub const TRANSPORT_PROFILE_URI_BINARY: &str = + "http://opcfoundation.org/UA-Profile/Transport/uatcp-uasc-uabinary"; - pub const SECURITY_USER_TOKEN_POLICY_ANONYMOUS: &str = "http://opcfoundation.org/UA-Profile/Security/UserToken/Anonymous"; - pub const SECURITY_USER_TOKEN_POLICY_USERPASS: &str = "http://opcfoundation.org/UA-Profile/ Security/UserToken-Server/UserNamePassword"; + pub const SECURITY_USER_TOKEN_POLICY_ANONYMOUS: &str = + "http://opcfoundation.org/UA-Profile/Security/UserToken/Anonymous"; + pub const SECURITY_USER_TOKEN_POLICY_USERPASS: &str = + "http://opcfoundation.org/UA-Profile/ Security/UserToken-Server/UserNamePassword"; } pub mod constants { @@ -188,57 +191,38 @@ bitflags! { mod status_codes; -pub mod encoding; +pub mod argument; +pub mod array; +pub mod attribute; pub mod basic_types; -pub mod string; -pub mod qualified_name; -pub mod localized_text; -pub mod extension_object; pub mod byte_string; +pub mod data_types; pub mod data_value; pub mod date_time; pub mod diagnostic_info; +pub mod encoding; +pub mod extension_object; pub mod guid; +pub mod localized_text; pub mod node_id; pub mod node_ids; -pub mod variant; -pub mod array; -pub mod data_types; pub mod notification_message; -pub mod attribute; pub mod numeric_range; -pub mod argument; -pub mod service_types; -pub mod status_code; -pub mod relative_path; pub mod operand; +pub mod qualified_name; +pub mod relative_path; pub mod request_header; pub mod response_header; +pub mod service_types; +pub mod status_code; +pub mod string; +pub mod variant; pub use crate::{ - encoding::*, - basic_types::*, - localized_text::*, - qualified_name::*, - string::*, - extension_object::*, - byte_string::*, - data_value::*, - diagnostic_info::*, - date_time::*, - guid::*, - node_id::*, - node_ids::*, - variant::*, - array::*, - data_types::*, - attribute::*, - service_types::*, - numeric_range::*, - argument::*, - operand::*, - request_header::*, - response_header::*, + argument::*, array::*, attribute::*, basic_types::*, byte_string::*, data_types::*, + data_value::*, date_time::*, diagnostic_info::*, encoding::*, extension_object::*, guid::*, + localized_text::*, node_id::*, node_ids::*, numeric_range::*, operand::*, qualified_name::*, + request_header::*, response_header::*, service_types::*, string::*, variant::*, }; #[cfg(test)] diff --git a/types/src/localized_text.rs b/types/src/localized_text.rs index 973a50c3f..268f7d402 100644 --- a/types/src/localized_text.rs +++ b/types/src/localized_text.rs @@ -99,10 +99,7 @@ impl BinaryEncoder for LocalizedText { } else { UAString::null() }; - Ok(LocalizedText { - locale, - text, - }) + Ok(LocalizedText { locale, text }) } } diff --git a/types/src/node_id.rs b/types/src/node_id.rs index 2aa78f39a..2d6ff2802 100644 --- a/types/src/node_id.rs +++ b/types/src/node_id.rs @@ -5,11 +5,13 @@ //! Contains the implementation of `NodeId` and `ExpandedNodeId`. use std::{ - self, convert::TryFrom, fmt, + self, + convert::TryFrom, + fmt, io::{Read, Write}, str::FromStr, - sync::atomic::{AtomicUsize, Ordering}, u16, - u32, + sync::atomic::{AtomicUsize, Ordering}, + u16, u32, }; use crate::{ @@ -55,7 +57,7 @@ impl FromStr for Identifier { "s=" => Ok(UAString::from(v).into()), "g=" => Guid::from_str(v).map(|v| v.into()).map_err(|_| ()), "b=" => ByteString::from_base64(v).map(|v| v.into()).ok_or(()), - _ => Err(()) + _ => Err(()), } } } @@ -141,15 +143,9 @@ impl BinaryEncoder for NodeId { 7 } } - Identifier::String(ref value) => { - 3 + value.byte_len() - } - Identifier::Guid(ref value) => { - 3 + value.byte_len() - } - Identifier::ByteString(ref value) => { - 3 + value.byte_len() - } + Identifier::String(ref value) => 3 + value.byte_len(), + Identifier::Guid(ref value) => 3 + value.byte_len(), + Identifier::ByteString(ref value) => 3 + value.byte_len(), }; size } @@ -263,7 +259,8 @@ impl FromStr for NodeId { // Check namespace (optional) let namespace = if let Some(ns) = captures.name("ns") { - ns.as_str().parse::() + ns.as_str() + .parse::() .map_err(|_| StatusCode::BadNodeIdInvalid)? } else { 0 @@ -272,9 +269,7 @@ impl FromStr for NodeId { // Type identifier let t = captures.name("t").unwrap(); Identifier::from_str(t.as_str()) - .map(|t| { - NodeId::new(namespace, t) - }) + .map(|t| NodeId::new(namespace, t)) .map_err(|_| StatusCode::BadNodeIdInvalid) } } @@ -332,8 +327,14 @@ impl Default for NodeId { impl NodeId { // Constructs a new NodeId from anything that can be turned into Identifier // u32, Guid, ByteString or String - pub fn new(namespace: u16, value: T) -> NodeId where T: 'static + Into { - NodeId { namespace, identifier: value.into() } + pub fn new(namespace: u16, value: T) -> NodeId + where + T: 'static + Into, + { + NodeId { + namespace, + identifier: value.into(), + } } /// Returns the node id for the root folder. @@ -368,14 +369,17 @@ impl NodeId { // Creates a numeric node id with an id incrementing up from 1000 pub fn next_numeric(namespace: u16) -> NodeId { - NodeId::new(namespace, NEXT_NODE_ID_NUMERIC.fetch_add(1, Ordering::SeqCst) as u32) + NodeId::new( + namespace, + NEXT_NODE_ID_NUMERIC.fetch_add(1, Ordering::SeqCst) as u32, + ) } /// Extracts an ObjectId from a node id, providing the node id holds an object id pub fn as_object_id(&self) -> std::result::Result { match self.identifier { Identifier::Numeric(id) if self.namespace == 0 => ObjectId::try_from(id), - _ => Err(()) + _ => Err(()), } } @@ -384,11 +388,10 @@ impl NodeId { // types if self.is_null() { Err(()) - } - else { + } else { match self.identifier { Identifier::Numeric(id) if self.namespace == 0 => ReferenceTypeId::try_from(id), - _ => Err(()) + _ => Err(()), } } } @@ -542,8 +545,16 @@ impl BinaryEncoder for ExpandedNodeId { }; // Optional stuff - let namespace_uri = if data_encoding & 0x80 != 0 { UAString::decode(stream, decoding_limits)? } else { UAString::null() }; - let server_index = if data_encoding & 0x40 != 0 { u32::decode(stream, decoding_limits)? } else { 0 }; + let namespace_uri = if data_encoding & 0x80 != 0 { + UAString::decode(stream, decoding_limits)? + } else { + UAString::null() + }; + let server_index = if data_encoding & 0x40 != 0 { + u32::decode(stream, decoding_limits)? + } else { + 0 + }; Ok(ExpandedNodeId { node_id, @@ -581,7 +592,11 @@ impl fmt::Display for ExpandedNodeId { .replace("%", "%25") .replace(";", "%3b"); // svr=;nsu=;= - write!(f, "svr={};nsu={};{}", self.server_index, namespace_uri, self.node_id.identifier) + write!( + f, + "svr={};nsu={};{}", + self.server_index, namespace_uri, self.node_id.identifier + ) } } } @@ -605,10 +620,13 @@ impl FromStr for ExpandedNodeId { let captures = RE.captures(s).ok_or(StatusCode::BadNodeIdInvalid)?; // Server index - let server_index = captures.name("svr") + let server_index = captures + .name("svr") .ok_or(StatusCode::BadNodeIdInvalid) .and_then(|server_index| { - server_index.as_str().parse::() + server_index + .as_str() + .parse::() .map_err(|_| StatusCode::BadNodeIdInvalid) })?; @@ -624,7 +642,8 @@ impl FromStr for ExpandedNodeId { }; let namespace = if let Some(ns) = captures.name("ns") { - ns.as_str().parse::() + ns.as_str() + .parse::() .map_err(|_| StatusCode::BadNodeIdInvalid)? } else { 0 @@ -633,12 +652,10 @@ impl FromStr for ExpandedNodeId { // Type identifier let t = captures.name("t").unwrap(); Identifier::from_str(t.as_str()) - .map(|t| { - ExpandedNodeId { - server_index, - namespace_uri, - node_id: NodeId::new(namespace, t), - } + .map(|t| ExpandedNodeId { + server_index, + namespace_uri, + node_id: NodeId::new(namespace, t), }) .map_err(|_| StatusCode::BadNodeIdInvalid) } @@ -646,7 +663,10 @@ impl FromStr for ExpandedNodeId { impl ExpandedNodeId { /// Creates an expanded node id from a node id - pub fn new(value: T) -> ExpandedNodeId where T: 'static + Into { + pub fn new(value: T) -> ExpandedNodeId + where + T: 'static + Into, + { value.into() } @@ -657,4 +677,4 @@ impl ExpandedNodeId { pub fn is_null(&self) -> bool { self.node_id.is_null() } -} \ No newline at end of file +} diff --git a/types/src/notification_message.rs b/types/src/notification_message.rs index 0c746822a..4765a0656 100644 --- a/types/src/notification_message.rs +++ b/types/src/notification_message.rs @@ -3,7 +3,6 @@ // Copyright (C) 2017-2020 Adam Lock ///! Helpers for NotificationMessage types - use crate::{ date_time::DateTime, diagnostic_info::DiagnosticInfo, @@ -22,7 +21,12 @@ impl NotificationMessage { /// Create a notification message which contains data change AND / OR events. Calling this with /// neither will panic. Notification data can have up to 2 elements to covers the case in /// table 158 where a subscription contains monitored items for events and data. - pub fn data_change(sequence_number: u32, publish_time: DateTime, data_change_notifications: Vec, event_notifications: Vec) -> NotificationMessage { + pub fn data_change( + sequence_number: u32, + publish_time: DateTime, + data_change_notifications: Vec, + event_notifications: Vec, + ) -> NotificationMessage { if data_change_notifications.is_empty() && event_notifications.is_empty() { panic!("No notifications supplied to data_change()"); } @@ -34,14 +38,20 @@ impl NotificationMessage { diagnostic_infos: None, }; trace!("data change notification = {:?}", data_change_notification); - notification_data.push(ExtensionObject::from_encodable(ObjectId::DataChangeNotification_Encoding_DefaultBinary, &data_change_notification)); + notification_data.push(ExtensionObject::from_encodable( + ObjectId::DataChangeNotification_Encoding_DefaultBinary, + &data_change_notification, + )); } if !event_notifications.is_empty() { let event_notification_list = EventNotificationList { - events: Some(event_notifications) + events: Some(event_notifications), }; trace!("event notification = {:?}", event_notification_list); - notification_data.push(ExtensionObject::from_encodable(ObjectId::EventNotificationList_Encoding_DefaultBinary, &event_notification_list)); + notification_data.push(ExtensionObject::from_encodable( + ObjectId::EventNotificationList_Encoding_DefaultBinary, + &event_notification_list, + )); } // Both data and events are serialized @@ -52,12 +62,19 @@ impl NotificationMessage { } } /// Create a status change notification message - pub fn status_change(sequence_number: u32, publish_time: DateTime, status: StatusCode) -> NotificationMessage { + pub fn status_change( + sequence_number: u32, + publish_time: DateTime, + status: StatusCode, + ) -> NotificationMessage { let status_change_notification = StatusChangeNotification { status, diagnostic_info: DiagnosticInfo::null(), }; - let notification_data = ExtensionObject::from_encodable(ObjectId::StatusChangeNotification_Encoding_DefaultBinary, &status_change_notification); + let notification_data = ExtensionObject::from_encodable( + ObjectId::StatusChangeNotification_Encoding_DefaultBinary, + &status_change_notification, + ); NotificationMessage { sequence_number, publish_time, @@ -76,10 +93,15 @@ impl NotificationMessage { /// Extract notifications from the message. Unrecognized / unparseable notifications will be /// ignored. If there are no notifications, the function will return `None`. - pub fn notifications(&self, decoding_limits: &DecodingLimits) -> Option<(Vec, Vec)> { + pub fn notifications( + &self, + decoding_limits: &DecodingLimits, + ) -> Option<(Vec, Vec)> { if let Some(ref notification_data) = self.notification_data { - let data_change_notification_id: NodeId = ObjectId::DataChangeNotification_Encoding_DefaultBinary.into(); - let event_notification_list_id: NodeId = ObjectId::EventNotificationList_Encoding_DefaultBinary.into(); + let data_change_notification_id: NodeId = + ObjectId::DataChangeNotification_Encoding_DefaultBinary.into(); + let event_notification_list_id: NodeId = + ObjectId::EventNotificationList_Encoding_DefaultBinary.into(); let mut data_changes = Vec::with_capacity(notification_data.len()); let mut events = Vec::with_capacity(notification_data.len()); diff --git a/types/src/numeric_range.rs b/types/src/numeric_range.rs index c13eb6d81..c5fa11bc0 100644 --- a/types/src/numeric_range.rs +++ b/types/src/numeric_range.rs @@ -64,24 +64,32 @@ fn valid_numeric_ranges() { ("4294967295", NumericRange::Index(4294967295), "4294967295"), ("1:2", NumericRange::Range(1, 2), "1:2"), ("2:3", NumericRange::Range(2, 3), "2:3"), - ("0:1,0:2,0:3,0:4,0:5", NumericRange::MultipleRanges(vec![ - NumericRange::Range(0, 1), - NumericRange::Range(0, 2), - NumericRange::Range(0, 3), - NumericRange::Range(0, 4), - NumericRange::Range(0, 5) - ]), "0:1,0:2,0:3,0:4,0:5"), - ("0:1,2,3,0:4,5,6,7,8,0:9", NumericRange::MultipleRanges(vec![ - NumericRange::Range(0, 1), - NumericRange::Index(2), - NumericRange::Index(3), - NumericRange::Range(0, 4), - NumericRange::Index(5), - NumericRange::Index(6), - NumericRange::Index(7), - NumericRange::Index(8), - NumericRange::Range(0, 9) - ]), "0:1,2,3,0:4,5,6,7,8,0:9") + ( + "0:1,0:2,0:3,0:4,0:5", + NumericRange::MultipleRanges(vec![ + NumericRange::Range(0, 1), + NumericRange::Range(0, 2), + NumericRange::Range(0, 3), + NumericRange::Range(0, 4), + NumericRange::Range(0, 5), + ]), + "0:1,0:2,0:3,0:4,0:5", + ), + ( + "0:1,2,3,0:4,5,6,7,8,0:9", + NumericRange::MultipleRanges(vec![ + NumericRange::Range(0, 1), + NumericRange::Index(2), + NumericRange::Index(3), + NumericRange::Range(0, 4), + NumericRange::Index(5), + NumericRange::Index(6), + NumericRange::Index(7), + NumericRange::Index(8), + NumericRange::Range(0, 9), + ]), + "0:1,2,3,0:4,5,6,7,8,0:9", + ), ]; for vr in valid_ranges { let range = vr.0.parse::(); @@ -99,9 +107,28 @@ fn invalid_numeric_ranges() { // Invalid values are either malformed, contain a min >= max, or they exceed limits on size of numbers // or number of indices. let invalid_ranges = vec![ - " ", " 1", "1 ", ":", ":1", "1:1", "2:1", "0:1,2,3,4:4", "1:", "1:1:2", ",", ":,", ",:", - ",1", "1,", "1,2,", "1,,2", "01234567890", "0,1,2,3,4,5,6,7,8,9,10", - "4294967296", "0:4294967296", "4294967296:0" + " ", + " 1", + "1 ", + ":", + ":1", + "1:1", + "2:1", + "0:1,2,3,4:4", + "1:", + "1:1:2", + ",", + ":,", + ",:", + ",1", + "1,", + "1,2,", + "1,,2", + "01234567890", + "0,1,2,3,4,5,6,7,8,9,10", + "4294967296", + "0:4294967296", + "4294967296:0", ]; for vr in invalid_ranges { println!("vr = {}", vr); @@ -150,7 +177,10 @@ impl FromStr for NumericRange { } impl NumericRange { - pub fn new(s: T) -> Result where T: Into { + pub fn new(s: T) -> Result + where + T: Into, + { Self::from_str(s.into().as_ref()) } @@ -182,18 +212,19 @@ impl NumericRange { // To stop insane values, a number must be 10 digits (sufficient for any permissible // 32-bit value) or less regardless of leading zeroes. lazy_static! { - static ref RE: Regex = Regex::new("^(?P[0-9]{1,10})(:(?P[0-9]{1,10}))?$").unwrap(); + static ref RE: Regex = + Regex::new("^(?P[0-9]{1,10})(:(?P[0-9]{1,10}))?$").unwrap(); } if let Some(captures) = RE.captures(s) { let min = captures.name("min"); let max = captures.name("max"); match (min, max) { (None, None) | (None, Some(_)) => Err(()), - (Some(min), None) => { - min.as_str().parse::() - .map(|min| NumericRange::Index(min)) - .map_err(|_| ()) - } + (Some(min), None) => min + .as_str() + .parse::() + .map(|min| NumericRange::Index(min)) + .map_err(|_| ()), (Some(min), Some(max)) => { // Parse as 64-bit but cast down if let Ok(min) = min.as_str().parse::() { @@ -225,13 +256,13 @@ impl NumericRange { match self { NumericRange::None => true, NumericRange::Index(_) => true, - NumericRange::Range(min, max) => { min < max } + NumericRange::Range(min, max) => min < max, NumericRange::MultipleRanges(ref ranges) => { let found_invalid = ranges.iter().any(|r| { // Nested multiple ranges are not allowed match r { NumericRange::MultipleRanges(_) => true, - r => !r.is_valid() + r => !r.is_valid(), } }); !found_invalid diff --git a/types/src/operand.rs b/types/src/operand.rs index 1e966a702..62ace5a41 100644 --- a/types/src/operand.rs +++ b/types/src/operand.rs @@ -5,14 +5,14 @@ use std::convert::TryFrom; use crate::{ - attribute::AttributeId, DecodingLimits, ExtensionObject, node_ids::ObjectId, NodeId, QualifiedName, + attribute::AttributeId, + node_ids::ObjectId, service_types::{ - AttributeOperand, ContentFilter, ContentFilterElement, ElementOperand, - FilterOperator, LiteralOperand, SimpleAttributeOperand, + AttributeOperand, ContentFilter, ContentFilterElement, ElementOperand, FilterOperator, + LiteralOperand, SimpleAttributeOperand, }, status_code::StatusCode, - UAString, - Variant, + DecodingLimits, ExtensionObject, NodeId, QualifiedName, UAString, Variant, }; #[derive(PartialEq)] @@ -106,17 +106,25 @@ impl TryFrom<&ExtensionObject> for Operand { type Error = StatusCode; fn try_from(v: &ExtensionObject) -> Result { - let object_id = v.object_id().map_err(|_| StatusCode::BadFilterOperandInvalid)?; + let object_id = v + .object_id() + .map_err(|_| StatusCode::BadFilterOperandInvalid)?; let decoding_limits = DecodingLimits::default(); let operand = match object_id { - ObjectId::ElementOperand_Encoding_DefaultBinary => - Operand::ElementOperand(v.decode_inner::(&decoding_limits)?), - ObjectId::LiteralOperand_Encoding_DefaultBinary => - Operand::LiteralOperand(v.decode_inner::(&decoding_limits)?), - ObjectId::AttributeOperand_Encoding_DefaultBinary => - Operand::AttributeOperand(v.decode_inner::(&decoding_limits)?), - ObjectId::SimpleAttributeOperand_Encoding_DefaultBinary => - Operand::SimpleAttributeOperand(v.decode_inner::(&decoding_limits)?), + ObjectId::ElementOperand_Encoding_DefaultBinary => { + Operand::ElementOperand(v.decode_inner::(&decoding_limits)?) + } + ObjectId::LiteralOperand_Encoding_DefaultBinary => { + Operand::LiteralOperand(v.decode_inner::(&decoding_limits)?) + } + ObjectId::AttributeOperand_Encoding_DefaultBinary => { + Operand::AttributeOperand(v.decode_inner::(&decoding_limits)?) + } + ObjectId::SimpleAttributeOperand_Encoding_DefaultBinary => { + Operand::SimpleAttributeOperand( + v.decode_inner::(&decoding_limits)?, + ) + } _ => { return Err(StatusCode::BadFilterOperandInvalid); } @@ -128,10 +136,20 @@ impl TryFrom<&ExtensionObject> for Operand { impl From<&Operand> for ExtensionObject { fn from(v: &Operand) -> Self { match v { - Operand::ElementOperand(ref op) => ExtensionObject::from_encodable(ObjectId::ElementOperand_Encoding_DefaultBinary, op), - Operand::LiteralOperand(ref op) => ExtensionObject::from_encodable(ObjectId::LiteralOperand_Encoding_DefaultBinary, op), - Operand::AttributeOperand(ref op) => ExtensionObject::from_encodable(ObjectId::AttributeOperand_Encoding_DefaultBinary, op), - Operand::SimpleAttributeOperand(ref op) => ExtensionObject::from_encodable(ObjectId::SimpleAttributeOperand_Encoding_DefaultBinary, op), + Operand::ElementOperand(ref op) => { + ExtensionObject::from_encodable(ObjectId::ElementOperand_Encoding_DefaultBinary, op) + } + Operand::LiteralOperand(ref op) => { + ExtensionObject::from_encodable(ObjectId::LiteralOperand_Encoding_DefaultBinary, op) + } + Operand::AttributeOperand(ref op) => ExtensionObject::from_encodable( + ObjectId::AttributeOperand_Encoding_DefaultBinary, + op, + ), + Operand::SimpleAttributeOperand(ref op) => ExtensionObject::from_encodable( + ObjectId::SimpleAttributeOperand_Encoding_DefaultBinary, + op, + ), } } } @@ -174,15 +192,25 @@ impl Operand { ElementOperand { index }.into() } - pub fn literal(literal: T) -> Operand where T: Into { + pub fn literal(literal: T) -> Operand + where + T: Into, + { Operand::LiteralOperand(literal.into()) } /// Creates a simple attribute operand. The browse path is the browse name using / as a separator. - pub fn simple_attribute(type_definition_id: T, browse_path: &str, attribute_id: AttributeId, index_range: UAString) -> Operand - where T: Into + pub fn simple_attribute( + type_definition_id: T, + browse_path: &str, + attribute_id: AttributeId, + index_range: UAString, + ) -> Operand + where + T: Into, { - SimpleAttributeOperand::new(type_definition_id, browse_path, attribute_id, index_range).into() + SimpleAttributeOperand::new(type_definition_id, browse_path, attribute_id, index_range) + .into() } pub fn operand_type(&self) -> OperandType { @@ -190,7 +218,7 @@ impl Operand { Operand::ElementOperand(_) => OperandType::ElementOperand, Operand::LiteralOperand(_) => OperandType::LiteralOperand, Operand::AttributeOperand(_) => OperandType::AttributeOperand, - Operand::SimpleAttributeOperand(_) => OperandType::SimpleAttributeOperand + Operand::SimpleAttributeOperand(_) => OperandType::SimpleAttributeOperand, } } @@ -219,18 +247,25 @@ impl Operand { /// The builder takes generic types to make it easier to work with. Operands are converted to /// extension objects. pub struct ContentFilterBuilder { - elements: Vec + elements: Vec, } impl ContentFilterBuilder { pub fn new() -> Self { ContentFilterBuilder { - elements: Vec::with_capacity(20) + elements: Vec::with_capacity(20), } } - fn add_element(mut self, filter_operator: FilterOperator, filter_operands: Vec) -> Self { - let filter_operands = filter_operands.iter().map(|o| ExtensionObject::from(o)).collect(); + fn add_element( + mut self, + filter_operator: FilterOperator, + filter_operands: Vec, + ) -> Self { + let filter_operands = filter_operands + .iter() + .map(|o| ExtensionObject::from(o)) + .collect(); self.elements.push(ContentFilterElement { filter_operator, filter_operands: Some(filter_operands), @@ -239,60 +274,87 @@ impl ContentFilterBuilder { } pub fn is_eq(self, o1: T, o2: S) -> Self - where T: Into, - S: Into { + where + T: Into, + S: Into, + { self.add_element(FilterOperator::Equals, vec![o1.into(), o2.into()]) } - pub fn is_null(self, o1: T) -> Self where T: Into { + pub fn is_null(self, o1: T) -> Self + where + T: Into, + { self.add_element(FilterOperator::IsNull, vec![o1.into()]) } pub fn is_gt(self, o1: T, o2: S) -> Self - where T: Into, - S: Into { + where + T: Into, + S: Into, + { self.add_element(FilterOperator::GreaterThan, vec![o1.into(), o2.into()]) } pub fn is_lt(self, o1: T, o2: S) -> Self - where T: Into, - S: Into { + where + T: Into, + S: Into, + { self.add_element(FilterOperator::LessThan, vec![o1.into(), o2.into()]) } pub fn is_gte(self, o1: T, o2: S) -> Self - where T: Into, - S: Into { - self.add_element(FilterOperator::GreaterThanOrEqual, vec![o1.into(), o2.into()]) + where + T: Into, + S: Into, + { + self.add_element( + FilterOperator::GreaterThanOrEqual, + vec![o1.into(), o2.into()], + ) } pub fn is_lte(self, o1: T, o2: S) -> Self - where T: Into, - S: Into { + where + T: Into, + S: Into, + { self.add_element(FilterOperator::LessThanOrEqual, vec![o1.into(), o2.into()]) } pub fn is_like(self, o1: T, o2: S) -> Self - where T: Into, - S: Into { + where + T: Into, + S: Into, + { self.add_element(FilterOperator::Like, vec![o1.into(), o2.into()]) } pub fn not(self, o1: T) -> Self - where T: Into { + where + T: Into, + { self.add_element(FilterOperator::Not, vec![o1.into()]) } pub fn is_between(self, o1: T, o2: S, o3: U) -> Self - where T: Into, - S: Into, - U: Into { - self.add_element(FilterOperator::Between, vec![o1.into(), o2.into(), o3.into()]) + where + T: Into, + S: Into, + U: Into, + { + self.add_element( + FilterOperator::Between, + vec![o1.into(), o2.into(), o3.into()], + ) } pub fn is_in_list(self, o1: T, list_items: Vec) -> Self - where T: Into, - S: Into { + where + T: Into, + S: Into, + { // Make a list from the operand and then the items let mut filter_operands = Vec::with_capacity(list_items.len() + 1); filter_operands.push(o1.into()); @@ -303,45 +365,61 @@ impl ContentFilterBuilder { } pub fn and(self, o1: T, o2: S) -> Self - where T: Into, - S: Into { + where + T: Into, + S: Into, + { self.add_element(FilterOperator::And, vec![o1.into(), o2.into()]) } pub fn or(self, o1: T, o2: S) -> Self - where T: Into, - S: Into { + where + T: Into, + S: Into, + { self.add_element(FilterOperator::Or, vec![o1.into(), o2.into()]) } pub fn cast(self, o1: T, o2: S) -> Self - where T: Into, - S: Into { + where + T: Into, + S: Into, + { self.add_element(FilterOperator::Cast, vec![o1.into(), o2.into()]) } pub fn bitwise_and(self, o1: T, o2: S) -> Self - where T: Into, - S: Into { + where + T: Into, + S: Into, + { self.add_element(FilterOperator::BitwiseAnd, vec![o1.into(), o2.into()]) } pub fn bitwise_or(self, o1: T, o2: S) -> Self - where T: Into, - S: Into { + where + T: Into, + S: Into, + { self.add_element(FilterOperator::BitwiseOr, vec![o1.into(), o2.into()]) } pub fn build(self) -> ContentFilter { ContentFilter { - elements: Some(self.elements) + elements: Some(self.elements), } } } impl SimpleAttributeOperand { - pub fn new(type_definition_id: T, browse_path: &str, attribute_id: AttributeId, index_range: UAString) -> Self - where T: Into + pub fn new( + type_definition_id: T, + browse_path: &str, + attribute_id: AttributeId, + index_range: UAString, + ) -> Self + where + T: Into, { // An improbable string to replace escaped forward slashes. const ESCAPE_PATTERN: &str = "###!!!###@@@$$$$"; @@ -350,7 +428,10 @@ impl SimpleAttributeOperand { // If we had a regex with look around support then we could split a pattern such as `r"(? for QualifiedName { } impl QualifiedName { - pub fn new(namespace_index: u16, name: T) -> QualifiedName where T: Into { + pub fn new(namespace_index: u16, name: T) -> QualifiedName + where + T: Into, + { QualifiedName { namespace_index, name: name.into(), diff --git a/types/src/relative_path.rs b/types/src/relative_path.rs index 64cd48011..7a93b0c24 100644 --- a/types/src/relative_path.rs +++ b/types/src/relative_path.rs @@ -27,7 +27,9 @@ impl RelativePath { /// be used to look up nodes from their browse name. The function will reject strings /// that look unusually long or contain too many elements. pub fn from_str(path: &str, node_resolver: &CB) -> Result - where CB: Fn(u16, &str) -> Option { + where + CB: Fn(u16, &str) -> Option, + { let mut elements: Vec = Vec::new(); // This loop will break the string up into path segments. For each segment it will @@ -75,19 +77,23 @@ impl RelativePath { } Ok(RelativePath { - elements: Some(elements) + elements: Some(elements), }) } } impl<'a> From<&'a RelativePathElement> for String { fn from(element: &'a RelativePathElement) -> String { - let mut result = element.relative_path_reference_type(&RelativePathElement::default_browse_name_resolver); + let mut result = element + .relative_path_reference_type(&RelativePathElement::default_browse_name_resolver); if !element.target_name.name.is_null() { let always_use_namespace = true; let target_browse_name = escape_browse_name(element.target_name.name.as_ref()); if always_use_namespace || element.target_name.namespace_index > 0 { - result.push_str(&format!("{}:{}", element.target_name.namespace_index, target_browse_name)); + result.push_str(&format!( + "{}:{}", + element.target_name.namespace_index, target_browse_name + )); } else { result.push_str(&target_browse_name); } @@ -133,9 +139,7 @@ impl RelativePathElement { "HasTrueSubState" => ReferenceTypeId::HasTrueSubState.into(), "HasFalseSubState" => ReferenceTypeId::HasFalseSubState.into(), "HasCondition" => ReferenceTypeId::HasCondition.into(), - _ => { - NodeId::new(0, UAString::from(browse_name)) - } + _ => NodeId::new(0, UAString::from(browse_name)), } } else { NodeId::new(namespace, UAString::from(browse_name)) @@ -145,36 +149,45 @@ impl RelativePathElement { fn id_from_reference_type(id: u32) -> Option { // This syntax is horrible - it casts the u32 into an enum if it can - Some(match id { - id if id == ReferenceTypeId::References as u32 => "References", - id if id == ReferenceTypeId::NonHierarchicalReferences as u32 => "NonHierarchicalReferences", - id if id == ReferenceTypeId::HierarchicalReferences as u32 => "HierarchicalReferences", - id if id == ReferenceTypeId::HasChild as u32 => "HasChild", - id if id == ReferenceTypeId::Organizes as u32 => "Organizes", - id if id == ReferenceTypeId::HasEventSource as u32 => "HasEventSource", - id if id == ReferenceTypeId::HasModellingRule as u32 => "HasModellingRule", - id if id == ReferenceTypeId::HasEncoding as u32 => "HasEncoding", - id if id == ReferenceTypeId::HasDescription as u32 => "HasDescription", - id if id == ReferenceTypeId::HasTypeDefinition as u32 => "HasTypeDefinition", - id if id == ReferenceTypeId::GeneratesEvent as u32 => "GeneratesEvent", - id if id == ReferenceTypeId::Aggregates as u32 => "Aggregates", - id if id == ReferenceTypeId::HasSubtype as u32 => "HasSubtype", - id if id == ReferenceTypeId::HasProperty as u32 => "HasProperty", - id if id == ReferenceTypeId::HasComponent as u32 => "HasComponent", - id if id == ReferenceTypeId::HasNotifier as u32 => "HasNotifier", - id if id == ReferenceTypeId::HasOrderedComponent as u32 => "HasOrderedComponent", - id if id == ReferenceTypeId::FromState as u32 => "FromState", - id if id == ReferenceTypeId::ToState as u32 => "ToState", - id if id == ReferenceTypeId::HasCause as u32 => "HasCause", - id if id == ReferenceTypeId::HasEffect as u32 => "HasEffect", - id if id == ReferenceTypeId::HasHistoricalConfiguration as u32 => "HasHistoricalConfiguration", - id if id == ReferenceTypeId::HasSubStateMachine as u32 => "HasSubStateMachine", - id if id == ReferenceTypeId::AlwaysGeneratesEvent as u32 => "AlwaysGeneratesEvent", - id if id == ReferenceTypeId::HasTrueSubState as u32 => "HasTrueSubState", - id if id == ReferenceTypeId::HasFalseSubState as u32 => "HasFalseSubState", - id if id == ReferenceTypeId::HasCondition as u32 => "HasCondition", - _ => return None - }.to_string()) + Some( + match id { + id if id == ReferenceTypeId::References as u32 => "References", + id if id == ReferenceTypeId::NonHierarchicalReferences as u32 => { + "NonHierarchicalReferences" + } + id if id == ReferenceTypeId::HierarchicalReferences as u32 => { + "HierarchicalReferences" + } + id if id == ReferenceTypeId::HasChild as u32 => "HasChild", + id if id == ReferenceTypeId::Organizes as u32 => "Organizes", + id if id == ReferenceTypeId::HasEventSource as u32 => "HasEventSource", + id if id == ReferenceTypeId::HasModellingRule as u32 => "HasModellingRule", + id if id == ReferenceTypeId::HasEncoding as u32 => "HasEncoding", + id if id == ReferenceTypeId::HasDescription as u32 => "HasDescription", + id if id == ReferenceTypeId::HasTypeDefinition as u32 => "HasTypeDefinition", + id if id == ReferenceTypeId::GeneratesEvent as u32 => "GeneratesEvent", + id if id == ReferenceTypeId::Aggregates as u32 => "Aggregates", + id if id == ReferenceTypeId::HasSubtype as u32 => "HasSubtype", + id if id == ReferenceTypeId::HasProperty as u32 => "HasProperty", + id if id == ReferenceTypeId::HasComponent as u32 => "HasComponent", + id if id == ReferenceTypeId::HasNotifier as u32 => "HasNotifier", + id if id == ReferenceTypeId::HasOrderedComponent as u32 => "HasOrderedComponent", + id if id == ReferenceTypeId::FromState as u32 => "FromState", + id if id == ReferenceTypeId::ToState as u32 => "ToState", + id if id == ReferenceTypeId::HasCause as u32 => "HasCause", + id if id == ReferenceTypeId::HasEffect as u32 => "HasEffect", + id if id == ReferenceTypeId::HasHistoricalConfiguration as u32 => { + "HasHistoricalConfiguration" + } + id if id == ReferenceTypeId::HasSubStateMachine as u32 => "HasSubStateMachine", + id if id == ReferenceTypeId::AlwaysGeneratesEvent as u32 => "AlwaysGeneratesEvent", + id if id == ReferenceTypeId::HasTrueSubState as u32 => "HasTrueSubState", + id if id == ReferenceTypeId::HasFalseSubState as u32 => "HasFalseSubState", + id if id == ReferenceTypeId::HasCondition as u32 => "HasCondition", + _ => return None, + } + .to_string(), + ) } pub fn default_browse_name_resolver(node_id: &NodeId) -> Option { @@ -187,7 +200,7 @@ impl RelativePathElement { None } } - _ => None + _ => None, } } @@ -212,7 +225,9 @@ impl RelativePathElement { /// * `<#!2:MyReftype>2:blah` /// pub fn from_str(path: &str, node_resolver: &CB) -> Result - where CB: Fn(u16, &str) -> Option { + where + CB: Fn(u16, &str) -> Option, + { lazy_static! { static ref RE: Regex = Regex::new(r"(?P/|\.|(<(?P#|!|#!)?((?P[0-9]+):)?(?P[^#!].*)>))(?P.*)").unwrap(); } @@ -227,12 +242,13 @@ impl RelativePathElement { "/" => (ReferenceTypeId::HierarchicalReferences.into(), true, false), "." => (ReferenceTypeId::Aggregates.into(), true, false), _ => { - let (include_subtypes, is_inverse) = if let Some(flags) = captures.name("flags") { + let (include_subtypes, is_inverse) = if let Some(flags) = captures.name("flags") + { match flags.as_str() { "#" => (false, false), "!" => (true, true), "#!" => (false, true), - _ => panic!("Error in regular expression for flags") + _ => panic!("Error in regular expression for flags"), } } else { (true, false) @@ -255,7 +271,10 @@ impl RelativePathElement { node_resolver(0, browse_name) }; if reference_type_id.is_none() { - error!("Supplied node resolver was unable to resolve a reference type from {}", path); + error!( + "Supplied node resolver was unable to resolve a reference type from {}", + path + ); return Err(()); } (reference_type_id.unwrap(), include_subtypes, is_inverse) @@ -277,7 +296,9 @@ impl RelativePathElement { /// This code assumes that the reference type's node id has a string identifier and that /// the string identifier is the same as the browse name. pub(crate) fn relative_path_reference_type(&self, browse_name_resolver: &CB) -> String - where CB: Fn(&NodeId) -> Option { + where + CB: Fn(&NodeId) -> Option, + { let browse_name = browse_name_resolver(&self.reference_type_id).unwrap(); let mut result = String::with_capacity(1024); // Common references will come out as '/' or '.' @@ -300,7 +321,10 @@ impl RelativePathElement { let browse_name = escape_browse_name(browse_name.as_ref()); if self.reference_type_id.namespace != 0 { - result.push_str(&format!("{}:{}", self.reference_type_id.namespace, browse_name)); + result.push_str(&format!( + "{}:{}", + self.reference_type_id.namespace, browse_name + )); } else { result.push_str(&browse_name); } @@ -317,7 +341,7 @@ impl<'a> From<&'a RelativePath> for String { let mut result = String::with_capacity(1024); for e in elements.iter() { result.push_str(String::from(e).as_ref()); - }; + } result } else { String::new() @@ -363,7 +387,10 @@ fn target_name(target_name: &str) -> Result { if let Ok(namespace) = namespace.as_str().parse::() { namespace } else { - error!("Namespace {} for target name is out of range", namespace.as_str()); + error!( + "Namespace {} for target name is out of range", + namespace.as_str() + ); return Err(()); } } else { @@ -398,7 +425,9 @@ fn test_escape_browse_name() { (".Name_2", "&.Name_2"), (":Name_3", "&:Name_3"), ("&Name_4", "&&Name_4"), - ].iter().for_each(|n| { + ] + .iter() + .for_each(|n| { let original = n.0.to_string(); let escaped = n.1.to_string(); assert_eq!(escaped, escape_browse_name(&original)); @@ -413,43 +442,63 @@ fn test_relative_path_element() { use crate::qualified_name::QualifiedName; [ - (RelativePathElement { - reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), - is_inverse: false, - include_subtypes: true, - target_name: QualifiedName::new(0, "foo1"), - }, "/0:foo1"), - (RelativePathElement { - reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), - is_inverse: false, - include_subtypes: true, - target_name: QualifiedName::new(0, ".foo2"), - }, "/0:&.foo2"), - (RelativePathElement { - reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), - is_inverse: true, - include_subtypes: true, - target_name: QualifiedName::new(2, "foo3"), - }, "2:foo3"), - (RelativePathElement { - reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), - is_inverse: true, - include_subtypes: false, - target_name: QualifiedName::new(0, "foo4"), - }, "<#!HierarchicalReferences>0:foo4"), - (RelativePathElement { - reference_type_id: ReferenceTypeId::Aggregates.into(), - is_inverse: false, - include_subtypes: true, - target_name: QualifiedName::new(0, "foo5"), - }, ".0:foo5"), - (RelativePathElement { - reference_type_id: ReferenceTypeId::HasHistoricalConfiguration.into(), - is_inverse: false, - include_subtypes: true, - target_name: QualifiedName::new(0, "foo6"), - }, "0:foo6"), - ].iter().for_each(|n| { + ( + RelativePathElement { + reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), + is_inverse: false, + include_subtypes: true, + target_name: QualifiedName::new(0, "foo1"), + }, + "/0:foo1", + ), + ( + RelativePathElement { + reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), + is_inverse: false, + include_subtypes: true, + target_name: QualifiedName::new(0, ".foo2"), + }, + "/0:&.foo2", + ), + ( + RelativePathElement { + reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), + is_inverse: true, + include_subtypes: true, + target_name: QualifiedName::new(2, "foo3"), + }, + "2:foo3", + ), + ( + RelativePathElement { + reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), + is_inverse: true, + include_subtypes: false, + target_name: QualifiedName::new(0, "foo4"), + }, + "<#!HierarchicalReferences>0:foo4", + ), + ( + RelativePathElement { + reference_type_id: ReferenceTypeId::Aggregates.into(), + is_inverse: false, + include_subtypes: true, + target_name: QualifiedName::new(0, "foo5"), + }, + ".0:foo5", + ), + ( + RelativePathElement { + reference_type_id: ReferenceTypeId::HasHistoricalConfiguration.into(), + is_inverse: false, + include_subtypes: true, + target_name: QualifiedName::new(0, "foo6"), + }, + "0:foo6", + ), + ] + .iter() + .for_each(|n| { let element = &n.0; let expected = n.1.to_string(); @@ -458,7 +507,9 @@ fn test_relative_path_element() { assert_eq!(expected, actual); // Turn string back to element, compare to original element - let actual = RelativePathElement::from_str(&actual, &RelativePathElement::default_node_resolver).unwrap(); + let actual = + RelativePathElement::from_str(&actual, &RelativePathElement::default_node_resolver) + .unwrap(); assert_eq!(*element, actual); }); } @@ -471,85 +522,98 @@ fn test_relative_path() { // Samples are from OPC UA Part 4 Appendix A let tests = vec![ - (vec![ - RelativePathElement { + ( + vec![RelativePathElement { reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), is_inverse: false, include_subtypes: true, target_name: QualifiedName::new(2, "Block.Output"), - } - ], "/2:Block&.Output"), - (vec![ - RelativePathElement { - reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), - is_inverse: false, - include_subtypes: true, - target_name: QualifiedName::new(3, "Truck"), - }, - RelativePathElement { - reference_type_id: ReferenceTypeId::Aggregates.into(), - is_inverse: false, - include_subtypes: true, - target_name: QualifiedName::new(0, "NodeVersion"), }], - "/3:Truck.0:NodeVersion"), - (vec![ - RelativePathElement { - reference_type_id: NodeId::new(1, "ConnectedTo"), - is_inverse: false, - include_subtypes: true, - target_name: QualifiedName::new(1, "Boiler"), - }, - RelativePathElement { - reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), - is_inverse: false, - include_subtypes: true, - target_name: QualifiedName::new(1, "HeatSensor"), - }], - "<1:ConnectedTo>1:Boiler/1:HeatSensor"), - (vec![ - RelativePathElement { - reference_type_id: NodeId::new(1, "ConnectedTo"), - is_inverse: false, - include_subtypes: true, - target_name: QualifiedName::new(1, "Boiler"), - }, - RelativePathElement { - reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), - is_inverse: false, - include_subtypes: true, - target_name: QualifiedName::null(), - }], - "<1:ConnectedTo>1:Boiler/"), - (vec![ - RelativePathElement { + "/2:Block&.Output", + ), + ( + vec![ + RelativePathElement { + reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), + is_inverse: false, + include_subtypes: true, + target_name: QualifiedName::new(3, "Truck"), + }, + RelativePathElement { + reference_type_id: ReferenceTypeId::Aggregates.into(), + is_inverse: false, + include_subtypes: true, + target_name: QualifiedName::new(0, "NodeVersion"), + }, + ], + "/3:Truck.0:NodeVersion", + ), + ( + vec![ + RelativePathElement { + reference_type_id: NodeId::new(1, "ConnectedTo"), + is_inverse: false, + include_subtypes: true, + target_name: QualifiedName::new(1, "Boiler"), + }, + RelativePathElement { + reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), + is_inverse: false, + include_subtypes: true, + target_name: QualifiedName::new(1, "HeatSensor"), + }, + ], + "<1:ConnectedTo>1:Boiler/1:HeatSensor", + ), + ( + vec![ + RelativePathElement { + reference_type_id: NodeId::new(1, "ConnectedTo"), + is_inverse: false, + include_subtypes: true, + target_name: QualifiedName::new(1, "Boiler"), + }, + RelativePathElement { + reference_type_id: ReferenceTypeId::HierarchicalReferences.into(), + is_inverse: false, + include_subtypes: true, + target_name: QualifiedName::null(), + }, + ], + "<1:ConnectedTo>1:Boiler/", + ), + ( + vec![RelativePathElement { reference_type_id: ReferenceTypeId::HasChild.into(), is_inverse: false, include_subtypes: true, target_name: QualifiedName::new(2, "Wheel"), - }, - ], "2:Wheel"), - (vec![ - RelativePathElement { + }], + "2:Wheel", + ), + ( + vec![RelativePathElement { reference_type_id: ReferenceTypeId::HasChild.into(), is_inverse: true, include_subtypes: true, target_name: QualifiedName::new(0, "Truck"), - }, - ], "0:Truck"), - (vec![ - RelativePathElement { + }], + "0:Truck", + ), + ( + vec![RelativePathElement { reference_type_id: ReferenceTypeId::HasChild.into(), is_inverse: false, include_subtypes: true, target_name: QualifiedName::null(), - }, - ], ""), + }], + "", + ), ]; tests.into_iter().for_each(|n| { let relative_path = RelativePath { - elements: Some(n.0) + elements: Some(n.0), }; let expected = n.1.to_string(); @@ -558,7 +622,8 @@ fn test_relative_path() { assert_eq!(expected, actual); // Turn string back to element, compare to original path - let actual = RelativePath::from_str(&actual, &RelativePathElement::default_node_resolver).unwrap(); + let actual = + RelativePath::from_str(&actual, &RelativePathElement::default_node_resolver).unwrap(); assert_eq!(relative_path, actual); }); } diff --git a/types/src/request_header.rs b/types/src/request_header.rs index d650ecb5c..527e52cb7 100644 --- a/types/src/request_header.rs +++ b/types/src/request_header.rs @@ -2,16 +2,14 @@ // SPDX-License-Identifier: MPL-2.0 // Copyright (C) 2017-2020 Adam Lock -use std::{self, io::{Read, Write}}; +use std::{ + self, + io::{Read, Write}, +}; use crate::{ - data_types::*, - date_time::DateTime, - diagnostic_info::DiagnosticBits, - encoding::*, - extension_object::ExtensionObject, - node_id::NodeId, - string::UAString, + data_types::*, date_time::DateTime, diagnostic_info::DiagnosticBits, encoding::*, + extension_object::ExtensionObject, node_id::NodeId, string::UAString, }; /// The `RequestHeader` contains information common to every request from a client to the server. @@ -117,7 +115,8 @@ impl BinaryEncoder for RequestHeader { let authentication_token = NodeId::decode(stream, decoding_limits)?; let timestamp = UtcTime::decode(stream, decoding_limits)?; let request_handle = IntegerId::decode(stream, decoding_limits)?; - let return_diagnostics = DiagnosticBits::from_bits_truncate(u32::decode(stream, decoding_limits)?); + let return_diagnostics = + DiagnosticBits::from_bits_truncate(u32::decode(stream, decoding_limits)?); let audit_entry_id = UAString::decode(stream, decoding_limits)?; let timeout_hint = u32::decode(stream, decoding_limits)?; let additional_header = ExtensionObject::decode(stream, decoding_limits)?; @@ -134,7 +133,11 @@ impl BinaryEncoder for RequestHeader { } impl RequestHeader { - pub fn new(authentication_token: &NodeId, timestamp: &DateTime, request_handle: IntegerId) -> RequestHeader { + pub fn new( + authentication_token: &NodeId, + timestamp: &DateTime, + request_handle: IntegerId, + ) -> RequestHeader { RequestHeader { authentication_token: authentication_token.clone(), timestamp: timestamp.clone(), diff --git a/types/src/response_header.rs b/types/src/response_header.rs index 32f0e1765..58ce41524 100644 --- a/types/src/response_header.rs +++ b/types/src/response_header.rs @@ -2,16 +2,14 @@ // SPDX-License-Identifier: MPL-2.0 // Copyright (C) 2017-2020 Adam Lock -use std::{self, io::{Read, Write}}; +use std::{ + self, + io::{Read, Write}, +}; use crate::{ - data_types::*, - date_time::DateTime, - diagnostic_info::DiagnosticInfo, - encoding::*, - extension_object::ExtensionObject, - request_header::RequestHeader, - status_codes::StatusCode, + data_types::*, date_time::DateTime, diagnostic_info::DiagnosticInfo, encoding::*, + extension_object::ExtensionObject, request_header::RequestHeader, status_codes::StatusCode, string::UAString, }; @@ -73,11 +71,22 @@ impl ResponseHeader { ResponseHeader::new_service_result(request_header, StatusCode::Good) } - pub fn new_service_result(request_header: &RequestHeader, service_result: StatusCode) -> ResponseHeader { - ResponseHeader::new_timestamped_service_result(DateTime::now(), request_header, service_result) + pub fn new_service_result( + request_header: &RequestHeader, + service_result: StatusCode, + ) -> ResponseHeader { + ResponseHeader::new_timestamped_service_result( + DateTime::now(), + request_header, + service_result, + ) } - pub fn new_timestamped_service_result(timestamp: DateTime, request_header: &RequestHeader, service_result: StatusCode) -> ResponseHeader { + pub fn new_timestamped_service_result( + timestamp: DateTime, + request_header: &RequestHeader, + service_result: StatusCode, + ) -> ResponseHeader { ResponseHeader { timestamp, request_handle: request_header.request_handle, diff --git a/types/src/status_code.rs b/types/src/status_code.rs index 04eb365e1..b02a09b1f 100644 --- a/types/src/status_code.rs +++ b/types/src/status_code.rs @@ -6,12 +6,14 @@ //! the machine generated part. use std::{ - io::{self, Read, Write}, fmt, fmt::Formatter, + fmt, + fmt::Formatter, + io::{self, Read, Write}, }; use serde::{ - Serialize, Serializer, Deserialize, Deserializer, de::{self, Visitor}, + Deserialize, Deserializer, Serialize, Serializer, }; pub use crate::{encoding::*, status_codes::StatusCode}; @@ -98,7 +100,8 @@ impl From for io::Error { impl Serialize for StatusCode { fn serialize(&self, serializer: S) -> Result - where S: Serializer, + where + S: Serializer, { serializer.serialize_u32(self.bits()) } @@ -114,8 +117,8 @@ impl<'de> Visitor<'de> for StatusCodeVisitor { } fn visit_u32(self, value: u32) -> Result - where - E: de::Error, + where + E: de::Error, { Ok(value) } @@ -123,8 +126,12 @@ impl<'de> Visitor<'de> for StatusCodeVisitor { impl<'de> Deserialize<'de> for StatusCode { fn deserialize(deserializer: D) -> Result>::Error> - where D: Deserializer<'de> { - Ok(StatusCode::from_bits_truncate(deserializer.deserialize_u32(StatusCodeVisitor)?)) + where + D: Deserializer<'de>, + { + Ok(StatusCode::from_bits_truncate( + deserializer.deserialize_u32(StatusCodeVisitor)?, + )) } } @@ -142,6 +149,12 @@ fn status_code() { assert!(!StatusCode::BadDecodingError.is_uncertain()); assert!(!StatusCode::BadDecodingError.is_good()); - assert_eq!((StatusCode::BadDecodingError | StatusCode::HISTORICAL_CALCULATED).status(), StatusCode::BadDecodingError); - assert_eq!((StatusCode::BadDecodingError | StatusCode::HISTORICAL_CALCULATED).bitflags(), StatusCode::HISTORICAL_CALCULATED); -} \ No newline at end of file + assert_eq!( + (StatusCode::BadDecodingError | StatusCode::HISTORICAL_CALCULATED).status(), + StatusCode::BadDecodingError + ); + assert_eq!( + (StatusCode::BadDecodingError | StatusCode::HISTORICAL_CALCULATED).bitflags(), + StatusCode::HISTORICAL_CALCULATED + ); +} diff --git a/types/src/string.rs b/types/src/string.rs index 592262004..3a1326810 100644 --- a/types/src/string.rs +++ b/types/src/string.rs @@ -5,11 +5,15 @@ //! Contains the implementation of `UAString`. use std::{ - fmt, io::{Read, Write}, + fmt, + io::{Read, Write}, }; use crate::{ - encoding::{BinaryEncoder, DecodingLimits, EncodingResult, process_decode_io_result, process_encode_io_result, write_i32}, + encoding::{ + process_decode_io_result, process_encode_io_result, write_i32, BinaryEncoder, + DecodingLimits, EncodingResult, + }, status_codes::StatusCode, }; @@ -37,7 +41,11 @@ impl fmt::Display for UAString { impl BinaryEncoder for UAString { fn byte_len(&self) -> usize { // Length plus the actual string length in bytes for a non-null string. - 4 + if self.value.is_none() { 0 } else { self.value.as_ref().unwrap().len() } + 4 + if self.value.is_none() { + 0 + } else { + self.value.as_ref().unwrap().len() + } } fn encode(&self, stream: &mut S) -> EncodingResult { @@ -64,17 +72,19 @@ impl BinaryEncoder for UAString { error!("String buf length is a negative number {}", len); Err(StatusCode::BadDecodingError) } else if len as usize > decoding_limits.max_string_length { - error!("String buf length {} exceeds decoding limit {}", len, decoding_limits.max_string_length); + error!( + "String buf length {} exceeds decoding limit {}", + len, decoding_limits.max_string_length + ); Err(StatusCode::BadDecodingError) } else { // Create a buffer filled with zeroes and read the string over the top let mut buf = vec![0u8; len as usize]; process_decode_io_result(stream.read_exact(&mut buf))?; - let value = String::from_utf8(buf) - .map_err(|err| { - trace!("Decoded string was not valid UTF-8 - {}", err.to_string()); - StatusCode::BadDecodingError - })?; + let value = String::from_utf8(buf).map_err(|err| { + trace!("Decoded string was not valid UTF-8 - {}", err.to_string()); + StatusCode::BadDecodingError + })?; Ok(UAString::from(value)) } } @@ -88,7 +98,11 @@ impl From for String { impl AsRef for UAString { fn as_ref(&self) -> &str { - if self.is_null() { "" } else { self.value.as_ref().unwrap() } + if self.is_null() { + "" + } else { + self.value.as_ref().unwrap() + } } } @@ -100,7 +114,9 @@ impl<'a> From<&'a str> for UAString { impl From<&String> for UAString { fn from(value: &String) -> Self { - UAString { value: Some(value.clone()) } + UAString { + value: Some(value.clone()), + } } } @@ -120,7 +136,7 @@ impl<'a, 'b> PartialEq for UAString { fn eq(&self, other: &str) -> bool { match self.value { None => false, - Some(ref v) => v.eq(other) + Some(ref v) => v.eq(other), } } } @@ -136,12 +152,20 @@ impl UAString { /// Returns true if the string is null or empty, false otherwise pub fn is_empty(&self) -> bool { - if self.value.is_none() { true } else { self.value.as_ref().unwrap().is_empty() } + if self.value.is_none() { + true + } else { + self.value.as_ref().unwrap().is_empty() + } } /// Returns the length of the string in bytes or -1 for null. pub fn len(&self) -> isize { - if self.value.is_none() { -1 } else { self.value.as_ref().unwrap().len() as isize } + if self.value.is_none() { + -1 + } else { + self.value.as_ref().unwrap().len() as isize + } } /// Create a null string (not the same as an empty string). @@ -233,4 +257,4 @@ fn string_substring() { } /// An XML element. -pub type XmlElement = UAString; \ No newline at end of file +pub type XmlElement = UAString; diff --git a/types/src/tests/date_time.rs b/types/src/tests/date_time.rs index eecf8ea82..58c8ca98b 100644 --- a/types/src/tests/date_time.rs +++ b/types/src/tests/date_time.rs @@ -56,4 +56,4 @@ fn string() { // Note: This parsing is potentially lossy so now != now2 and will be off by a small amount // so this code may have to change to compare an interval delta assert_eq!(now, now2); -} \ No newline at end of file +} diff --git a/types/src/tests/encoding.rs b/types/src/tests/encoding.rs index 329385571..0898357c5 100644 --- a/types/src/tests/encoding.rs +++ b/types/src/tests/encoding.rs @@ -1,13 +1,6 @@ -use std::{ - io::Cursor, - str::FromStr, -}; +use std::{io::Cursor, str::FromStr}; -use crate::{ - encoding::DecodingLimits, - string::UAString, - tests::*, -}; +use crate::{encoding::DecodingLimits, string::UAString, tests::*}; #[test] fn encoding_bool() { @@ -50,7 +43,6 @@ fn encoding_int32() { serialize_test(32004440 as i32); } - #[test] fn encoding_uint32() { serialize_test(0 as u32); @@ -65,7 +57,6 @@ fn encoding_int64() { serialize_test(32022204440 as i64); } - #[test] fn encoding_uint64() { serialize_test(0 as u64); @@ -112,7 +103,10 @@ fn decode_string_malformed_utf8() { let bytes = [0x06, 0x00, 0x00, 0xE6, 0xB0, 0xB4, 0x42, 0x6F, 0x79]; let mut stream = Cursor::new(bytes); let decoding_limits = DecodingLimits::default(); - assert_eq!(UAString::decode(&mut stream, &decoding_limits).unwrap_err(), StatusCode::BadDecodingError); + assert_eq!( + UAString::decode(&mut stream, &decoding_limits).unwrap_err(), + StatusCode::BadDecodingError + ); } #[test] @@ -138,16 +132,25 @@ fn encoding_datetime() { #[test] fn encoding_guid() { let guid = Guid::from_str("F0001234-FACE-BEEF-0102-030405060708").unwrap(); - assert_eq!("f0001234-face-beef-0102-030405060708", format!("{:?}", guid)); + assert_eq!( + "f0001234-face-beef-0102-030405060708", + format!("{:?}", guid) + ); let new_guid = serialize_test_and_return(guid.clone()); - assert_eq!("f0001234-face-beef-0102-030405060708", format!("{:?}", new_guid)); + assert_eq!( + "f0001234-face-beef-0102-030405060708", + format!("{:?}", new_guid) + ); serialize_test(guid); } #[test] fn encode_guid_5226() { // Sample from OPCUA Part 6 - 5.2.2.6 - let expected_bytes = [0x91, 0x2B, 0x96, 0x72, 0x75, 0xFA, 0xE6, 0x4A, 0x8D, 0x28, 0xB4, 0x04, 0xDC, 0x7D, 0xAF, 0x63]; + let expected_bytes = [ + 0x91, 0x2B, 0x96, 0x72, 0x75, 0xFA, 0xE6, 0x4A, 0x8D, 0x28, 0xB4, 0x04, 0xDC, 0x7D, 0xAF, + 0x63, + ]; let guid = Guid::from_str("912b9672-75fa-e64a-8D28-B404DC7DAF63").unwrap(); serialize_and_compare(guid, &expected_bytes); } @@ -204,7 +207,9 @@ fn node_id_string_part_6_5229() { let node_id = NodeId::new(1, "Hotæ°´"); assert!(node_id.is_string()); // NOTE: Example is wrong in 1.0.3, says 'r' instead of 'H' - let expected_bytes = [0x03, 0x1, 0x0, 0x6, 0x0, 0x0, 0x0, 0x48, 0x6F, 0x74, 0xE6, 0xB0, 0xB4]; + let expected_bytes = [ + 0x03, 0x1, 0x0, 0x6, 0x0, 0x0, 0x0, 0x48, 0x6F, 0x74, 0xE6, 0xB0, 0xB4, + ]; serialize_and_compare(node_id.clone(), &expected_bytes); serialize_test(node_id); @@ -303,7 +308,10 @@ fn qualified_name() { #[test] fn variant() { use std::mem; - println!("Size of a variant in bytes is {}", mem::size_of::()); + println!( + "Size of a variant in bytes is {}", + mem::size_of::() + ); // Boolean let v = Variant::Boolean(true); @@ -392,14 +400,25 @@ fn variant() { #[test] fn variant_single_dimension_array() { - let values = vec![Variant::Int32(100), Variant::Int32(200), Variant::Int32(300)]; + let values = vec![ + Variant::Int32(100), + Variant::Int32(200), + Variant::Int32(300), + ]; let v = Variant::from(values); serialize_test(v); } #[test] fn variant_multi_dimension_array() { - let values = vec![Variant::Int32(100), Variant::Int32(200), Variant::Int32(300), Variant::Int32(400), Variant::Int32(500), Variant::Int32(600)]; + let values = vec![ + Variant::Int32(100), + Variant::Int32(200), + Variant::Int32(300), + Variant::Int32(400), + Variant::Int32(500), + Variant::Int32(600), + ]; let dimensions = vec![3u32, 2u32]; let v = Variant::from((values, dimensions)); serialize_test(v); diff --git a/types/src/tests/mod.rs b/types/src/tests/mod.rs index 6ccff32b5..70753bf71 100644 --- a/types/src/tests/mod.rs +++ b/types/src/tests/mod.rs @@ -1,25 +1,27 @@ -mod encoding; mod date_time; +mod encoding; mod node_id; mod serde; mod variant; -use std::fmt::Debug; use std::cmp::PartialEq; +use std::fmt::Debug; use std::io::Cursor; -use crate::*; use crate::argument::Argument; use crate::status_codes::StatusCode; +use crate::*; pub fn serialize_test_and_return(value: T) -> T - where T: BinaryEncoder + Debug + PartialEq + Clone +where + T: BinaryEncoder + Debug + PartialEq + Clone, { serialize_test_and_return_expected(value.clone(), value) } pub fn serialize_test_and_return_expected(value: T, expected_value: T) -> T - where T: BinaryEncoder + Debug + PartialEq +where + T: BinaryEncoder + Debug + PartialEq, { // Ask the struct for its byte length let byte_len = value.byte_len(); @@ -50,20 +52,22 @@ pub fn serialize_test_and_return_expected(value: T, expected_value: T) -> T } pub fn serialize_test(value: T) - where T: BinaryEncoder + Debug + PartialEq + Clone +where + T: BinaryEncoder + Debug + PartialEq + Clone, { let _ = serialize_test_and_return(value); } pub fn serialize_test_expected(value: T, expected_value: T) - where T: BinaryEncoder + Debug + PartialEq +where + T: BinaryEncoder + Debug + PartialEq, { let _ = serialize_test_and_return_expected(value, expected_value); } - pub fn serialize_and_compare(value: T, expected: &[u8]) - where T: BinaryEncoder + Debug + PartialEq +where + T: BinaryEncoder + Debug + PartialEq, { // Ask the struct for its byte length let byte_len = value.byte_len(); diff --git a/types/src/tests/node_id.rs b/types/src/tests/node_id.rs index d034e10be..924580fe0 100644 --- a/types/src/tests/node_id.rs +++ b/types/src/tests/node_id.rs @@ -31,12 +31,18 @@ fn parse_node_id_string() { // String let node_id = NodeId::from_str("ns=1;s=Hello World").unwrap(); assert_eq!(node_id.namespace, 1); - assert_eq!(node_id.identifier, Identifier::String(UAString::from("Hello World"))); + assert_eq!( + node_id.identifier, + Identifier::String(UAString::from("Hello World")) + ); assert_eq!(format!("{}", node_id), "ns=1;s=Hello World"); let node_id = NodeId::from_str("s=No NS this time").unwrap(); assert_eq!(node_id.namespace, 0); - assert_eq!(node_id.identifier, Identifier::String(UAString::from("No NS this time"))); + assert_eq!( + node_id.identifier, + Identifier::String(UAString::from("No NS this time")) + ); assert_eq!(format!("{}", node_id), "s=No NS this time"); } @@ -45,9 +51,15 @@ fn parse_node_id_guid() { // Guid (note the mixed case) let node_id = NodeId::from_str("g=72962B91-FA75-4ae6-8D28-B404DC7DAF63").unwrap(); assert_eq!(node_id.namespace, 0); - assert_eq!(node_id.identifier, Identifier::Guid(Guid::from_str("72962B91-FA75-4ae6-8D28-B404DC7DAF63").unwrap())); + assert_eq!( + node_id.identifier, + Identifier::Guid(Guid::from_str("72962B91-FA75-4ae6-8D28-B404DC7DAF63").unwrap()) + ); // All lower case when returned - assert_eq!(format!("{}", node_id), "g=72962b91-fa75-4ae6-8d28-b404dc7daf63"); + assert_eq!( + format!("{}", node_id), + "g=72962b91-fa75-4ae6-8d28-b404dc7daf63" + ); } #[test] @@ -55,7 +67,10 @@ fn parse_node_id_byte_string() { // ByteString (sample bytes comes from OPC UA spec) let node_id = NodeId::from_str("ns=1;b=M/RbKBsRVkePCePcx24oRA==").unwrap(); assert_eq!(node_id.namespace, 1); - assert_eq!(node_id.identifier, Identifier::ByteString(ByteString::from_base64("M/RbKBsRVkePCePcx24oRA==").unwrap())); + assert_eq!( + node_id.identifier, + Identifier::ByteString(ByteString::from_base64("M/RbKBsRVkePCePcx24oRA==").unwrap()) + ); // Turn byte string back to string, compare to original assert_eq!(format!("{}", node_id), "ns=1;b=M/RbKBsRVkePCePcx24oRA=="); } @@ -88,7 +103,10 @@ fn expanded_node_id() { namespace_uri: UAString::from("http://foo;blah%"), // Contains escaped chars ; and % server_index: 33, // Note this should not display because the urn is present }; - assert_eq!(format!("{}", node_id), "svr=33;nsu=http://foo%3bblah%25;s=Hello World"); + assert_eq!( + format!("{}", node_id), + "svr=33;nsu=http://foo%3bblah%25;s=Hello World" + ); // Turn node into and out of a string, ensure equals itself let node_id = ExpandedNodeId { @@ -97,5 +115,8 @@ fn expanded_node_id() { server_index: 33, }; assert_eq!(format!("{}", node_id), "svr=33;ns=1;s=Hello World"); - assert_eq!(ExpandedNodeId::from_str("svr=33;ns=1;s=Hello World").unwrap(), node_id); -} \ No newline at end of file + assert_eq!( + ExpandedNodeId::from_str("svr=33;ns=1;s=Hello World").unwrap(), + node_id + ); +} diff --git a/types/src/tests/serde.rs b/types/src/tests/serde.rs index bb3e526db..4853aa970 100644 --- a/types/src/tests/serde.rs +++ b/types/src/tests/serde.rs @@ -1,10 +1,10 @@ use serde_json; use crate::data_value::DataValue; -use crate::variant::Variant; -use crate::guid::Guid; use crate::date_time::DateTime; +use crate::guid::Guid; use crate::status_codes::StatusCode; +use crate::variant::Variant; #[test] fn serialize_variant() { diff --git a/types/src/tests/variant.rs b/types/src/tests/variant.rs index 83bc45c55..2b4f61e09 100644 --- a/types/src/tests/variant.rs +++ b/types/src/tests/variant.rs @@ -2,11 +2,11 @@ use std::convert::TryFrom; use std::str::FromStr; use crate::{ - ByteString, DateTime, ExpandedNodeId, Guid, LocalizedText, NodeId, QualifiedName, UAString, - DataTypeId, numeric_range::NumericRange, status_code::StatusCode, variant::{Variant, VariantTypeId}, + ByteString, DataTypeId, DateTime, ExpandedNodeId, Guid, LocalizedText, NodeId, QualifiedName, + UAString, }; #[test] @@ -38,8 +38,11 @@ fn size() { #[test] fn variant_type_id() { - use crate::{UAString, DateTime, ByteString, XmlElement, NodeId, ExpandedNodeId, QualifiedName, LocalizedText, ExtensionObject, Guid}; use crate::status_codes::StatusCode; + use crate::{ + ByteString, DateTime, ExpandedNodeId, ExtensionObject, Guid, LocalizedText, NodeId, + QualifiedName, UAString, XmlElement, + }; let types = [ (Variant::Empty, VariantTypeId::Empty), @@ -56,15 +59,30 @@ fn variant_type_id() { (Variant::from(0f64), VariantTypeId::Double), (Variant::from(UAString::null()), VariantTypeId::String), (Variant::from(ByteString::null()), VariantTypeId::ByteString), - (Variant::XmlElement(XmlElement::null()), VariantTypeId::XmlElement), + ( + Variant::XmlElement(XmlElement::null()), + VariantTypeId::XmlElement, + ), (Variant::from(StatusCode::Good), VariantTypeId::StatusCode), (Variant::from(DateTime::now()), VariantTypeId::DateTime), (Variant::from(Guid::new()), VariantTypeId::Guid), (Variant::from(NodeId::null()), VariantTypeId::NodeId), - (Variant::from(ExpandedNodeId::null()), VariantTypeId::ExpandedNodeId), - (Variant::from(QualifiedName::null()), VariantTypeId::QualifiedName), - (Variant::from(LocalizedText::null()), VariantTypeId::LocalizedText), - (Variant::from(ExtensionObject::null()), VariantTypeId::ExtensionObject), + ( + Variant::from(ExpandedNodeId::null()), + VariantTypeId::ExpandedNodeId, + ), + ( + Variant::from(QualifiedName::null()), + VariantTypeId::QualifiedName, + ), + ( + Variant::from(LocalizedText::null()), + VariantTypeId::LocalizedText, + ), + ( + Variant::from(ExtensionObject::null()), + VariantTypeId::ExtensionObject, + ), (Variant::from(vec![1]), VariantTypeId::Array), ]; for t in &types { @@ -91,12 +109,12 @@ fn variant_u32_array() { Variant::UInt32(v) => { assert_eq!(v, i); } - _ => panic!("Not the expected type") + _ => panic!("Not the expected type"), } i += 1; } } - _ => panic!("Not an array") + _ => panic!("Not an array"), } } @@ -131,12 +149,12 @@ fn variant_i32_array() { Variant::Int32(v) => { assert_eq!(v, i); } - _ => panic!("Not the expected type") + _ => panic!("Not the expected type"), } i += 1; } } - _ => panic!("Not an array") + _ => panic!("Not an array"), } } @@ -166,7 +184,10 @@ fn variant_multi_dimensional_array() { assert!(v.is_array_of_type(VariantTypeId::Int32)); assert!(v.is_valid()); - let v = Variant::from((vec![Variant::from(10), Variant::from(10)], vec![1u32, 2u32, 3u32])); + let v = Variant::from(( + vec![Variant::from(10), Variant::from(10)], + vec![1u32, 2u32, 3u32], + )); assert!(v.is_array()); assert!(v.is_array_of_type(VariantTypeId::Int32)); assert!(!v.is_valid()); @@ -187,7 +208,7 @@ fn index_of_array() { assert_eq!(array.values.len(), 1); assert_eq!(array.values[0], Variant::Int32(2)); } - _ => panic!() + _ => panic!(), } let r = v.range_of(NumericRange::Range(1, 2)).unwrap(); @@ -197,7 +218,7 @@ fn index_of_array() { assert_eq!(array.values[0], Variant::Int32(2)); assert_eq!(array.values[1], Variant::Int32(3)); } - _ => panic!() + _ => panic!(), } let r = v.range_of(NumericRange::Range(1, 200)).unwrap(); @@ -205,7 +226,7 @@ fn index_of_array() { Variant::Array(array) => { assert_eq!(array.values.len(), 2); } - _ => panic!() + _ => panic!(), } let r = v.range_of(NumericRange::Range(3, 200)).unwrap_err(); @@ -231,7 +252,9 @@ fn index_of_string() { } fn ensure_conversion_fails(v: &Variant, convert_to: &[VariantTypeId]) { - convert_to.iter().for_each(|vt| assert_eq!(v.convert(*vt), Variant::Empty)); + convert_to + .iter() + .for_each(|vt| assert_eq!(v.convert(*vt), Variant::Empty)); } #[test] @@ -250,19 +273,34 @@ fn variant_convert_bool() { assert_eq!(v.convert(VariantTypeId::Int64), Variant::Int64(1)); assert_eq!(v.convert(VariantTypeId::UInt64), Variant::UInt64(1)); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::ByteString, VariantTypeId::String, VariantTypeId::DateTime, - VariantTypeId::ExpandedNodeId, VariantTypeId::Guid, VariantTypeId::NodeId, - VariantTypeId::StatusCode, VariantTypeId::LocalizedText, VariantTypeId::QualifiedName, - VariantTypeId::XmlElement - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::ByteString, + VariantTypeId::String, + VariantTypeId::DateTime, + VariantTypeId::ExpandedNodeId, + VariantTypeId::Guid, + VariantTypeId::NodeId, + VariantTypeId::StatusCode, + VariantTypeId::LocalizedText, + VariantTypeId::QualifiedName, + VariantTypeId::XmlElement, + ], + ); } #[test] fn variant_cast_bool() { // String - assert_eq!(Variant::from(false).cast(VariantTypeId::String), Variant::from("false")); - assert_eq!(Variant::from(true).cast(VariantTypeId::String), Variant::from("true")); + assert_eq!( + Variant::from(false).cast(VariantTypeId::String), + Variant::from("false") + ); + assert_eq!( + Variant::from(true).cast(VariantTypeId::String), + Variant::from("true") + ); } #[test] @@ -280,20 +318,36 @@ fn variant_convert_byte() { assert_eq!(v.convert(VariantTypeId::UInt32), Variant::UInt32(5)); assert_eq!(v.convert(VariantTypeId::UInt64), Variant::UInt64(5)); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::Boolean, VariantTypeId::String, VariantTypeId::ByteString, - VariantTypeId::DateTime, VariantTypeId::ExpandedNodeId, VariantTypeId::Guid, - VariantTypeId::NodeId, VariantTypeId::StatusCode, VariantTypeId::LocalizedText, - VariantTypeId::QualifiedName, VariantTypeId::XmlElement - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::Boolean, + VariantTypeId::String, + VariantTypeId::ByteString, + VariantTypeId::DateTime, + VariantTypeId::ExpandedNodeId, + VariantTypeId::Guid, + VariantTypeId::NodeId, + VariantTypeId::StatusCode, + VariantTypeId::LocalizedText, + VariantTypeId::QualifiedName, + VariantTypeId::XmlElement, + ], + ); } #[test] fn variant_cast_byte() { let v: Variant = 5u8.into(); // Boolean - assert_eq!(Variant::from(11u8).cast(VariantTypeId::Boolean), Variant::Empty); - assert_eq!(Variant::from(1u8).cast(VariantTypeId::Boolean), Variant::from(true)); + assert_eq!( + Variant::from(11u8).cast(VariantTypeId::Boolean), + Variant::Empty + ); + assert_eq!( + Variant::from(1u8).cast(VariantTypeId::Boolean), + Variant::from(true) + ); // String assert_eq!(v.cast(VariantTypeId::String), Variant::from("5")); } @@ -303,22 +357,45 @@ fn variant_convert_double() { let v: Variant = 12.5f64.into(); assert_eq!(v.convert(v.type_id()), v); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::Boolean, VariantTypeId::Byte, VariantTypeId::ByteString, - VariantTypeId::DateTime, VariantTypeId::ExpandedNodeId, VariantTypeId::Float, - VariantTypeId::Guid, VariantTypeId::Int16, VariantTypeId::Int32, VariantTypeId::Int64, - VariantTypeId::NodeId, VariantTypeId::SByte, VariantTypeId::StatusCode, VariantTypeId::String, - VariantTypeId::LocalizedText, VariantTypeId::QualifiedName, VariantTypeId::UInt16, - VariantTypeId::UInt32, VariantTypeId::UInt64, VariantTypeId::XmlElement - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::Boolean, + VariantTypeId::Byte, + VariantTypeId::ByteString, + VariantTypeId::DateTime, + VariantTypeId::ExpandedNodeId, + VariantTypeId::Float, + VariantTypeId::Guid, + VariantTypeId::Int16, + VariantTypeId::Int32, + VariantTypeId::Int64, + VariantTypeId::NodeId, + VariantTypeId::SByte, + VariantTypeId::StatusCode, + VariantTypeId::String, + VariantTypeId::LocalizedText, + VariantTypeId::QualifiedName, + VariantTypeId::UInt16, + VariantTypeId::UInt32, + VariantTypeId::UInt64, + VariantTypeId::XmlElement, + ], + ); } #[test] fn variant_cast_double() { let v: Variant = 12.5f64.into(); // Cast Boolean - assert_eq!(Variant::from(11f64).cast(VariantTypeId::Boolean), Variant::Empty); - assert_eq!(Variant::from(1f64).cast(VariantTypeId::Boolean), Variant::from(true)); + assert_eq!( + Variant::from(11f64).cast(VariantTypeId::Boolean), + Variant::Empty + ); + assert_eq!( + Variant::from(1f64).cast(VariantTypeId::Boolean), + Variant::from(true) + ); // Cast Byte, Float, Int16, Int32, Int64, SByte, UInt16, UInt32, UInt64 assert_eq!(v.cast(VariantTypeId::Byte), Variant::from(13u8)); assert_eq!(v.cast(VariantTypeId::Float), Variant::from(12.5f32)); @@ -339,22 +416,44 @@ fn variant_convert_float() { // All these are implicit conversions expected to succeed assert_eq!(v.convert(VariantTypeId::Double), Variant::Double(12.5)); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::Boolean, VariantTypeId::Byte, VariantTypeId::ByteString, - VariantTypeId::DateTime, VariantTypeId::ExpandedNodeId, VariantTypeId::Guid, - VariantTypeId::Int16, VariantTypeId::Int32, VariantTypeId::Int64, VariantTypeId::NodeId, - VariantTypeId::SByte, VariantTypeId::StatusCode, VariantTypeId::String, VariantTypeId::LocalizedText, - VariantTypeId::QualifiedName, VariantTypeId::UInt16, VariantTypeId::UInt32, VariantTypeId::UInt64, - VariantTypeId::XmlElement - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::Boolean, + VariantTypeId::Byte, + VariantTypeId::ByteString, + VariantTypeId::DateTime, + VariantTypeId::ExpandedNodeId, + VariantTypeId::Guid, + VariantTypeId::Int16, + VariantTypeId::Int32, + VariantTypeId::Int64, + VariantTypeId::NodeId, + VariantTypeId::SByte, + VariantTypeId::StatusCode, + VariantTypeId::String, + VariantTypeId::LocalizedText, + VariantTypeId::QualifiedName, + VariantTypeId::UInt16, + VariantTypeId::UInt32, + VariantTypeId::UInt64, + VariantTypeId::XmlElement, + ], + ); } #[test] fn variant_cast_float() { let v: Variant = 12.5f32.into(); // Boolean - assert_eq!(Variant::from(11f32).cast(VariantTypeId::Boolean), Variant::Empty); - assert_eq!(Variant::from(1f32).cast(VariantTypeId::Boolean), Variant::from(true)); + assert_eq!( + Variant::from(11f32).cast(VariantTypeId::Boolean), + Variant::Empty + ); + assert_eq!( + Variant::from(1f32).cast(VariantTypeId::Boolean), + Variant::from(true) + ); // Cast assert_eq!(v.cast(VariantTypeId::Byte), Variant::from(13u8)); assert_eq!(v.cast(VariantTypeId::Int16), Variant::from(13i16)); @@ -379,12 +478,25 @@ fn variant_convert_int16() { assert_eq!(v.convert(VariantTypeId::UInt32), Variant::UInt32(8)); assert_eq!(v.convert(VariantTypeId::UInt64), Variant::UInt64(8)); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::Boolean, VariantTypeId::Byte, VariantTypeId::ByteString, VariantTypeId::DateTime, - VariantTypeId::ExpandedNodeId, VariantTypeId::Guid, VariantTypeId::SByte, VariantTypeId::NodeId, - VariantTypeId::StatusCode, VariantTypeId::String, VariantTypeId::LocalizedText, VariantTypeId::QualifiedName, - VariantTypeId::UInt16, VariantTypeId::XmlElement - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::Boolean, + VariantTypeId::Byte, + VariantTypeId::ByteString, + VariantTypeId::DateTime, + VariantTypeId::ExpandedNodeId, + VariantTypeId::Guid, + VariantTypeId::SByte, + VariantTypeId::NodeId, + VariantTypeId::StatusCode, + VariantTypeId::String, + VariantTypeId::LocalizedText, + VariantTypeId::QualifiedName, + VariantTypeId::UInt16, + VariantTypeId::XmlElement, + ], + ); } #[test] @@ -392,11 +504,20 @@ fn variant_cast_int16() { let v: Variant = 8i16.into(); // Cast Boolean, Byte, SByte, String, UInt16 assert_eq!(v.cast(VariantTypeId::Boolean), Variant::Empty); - assert_eq!(Variant::from(1i16).cast(VariantTypeId::Boolean), Variant::from(true)); + assert_eq!( + Variant::from(1i16).cast(VariantTypeId::Boolean), + Variant::from(true) + ); assert_eq!(v.cast(VariantTypeId::Byte), Variant::from(8u8)); - assert_eq!(Variant::from(-120i16).cast(VariantTypeId::Byte), Variant::Empty); + assert_eq!( + Variant::from(-120i16).cast(VariantTypeId::Byte), + Variant::Empty + ); assert_eq!(v.cast(VariantTypeId::SByte), Variant::from(8i8)); - assert_eq!(Variant::from(-137i16).cast(VariantTypeId::SByte), Variant::Empty); + assert_eq!( + Variant::from(-137i16).cast(VariantTypeId::SByte), + Variant::Empty + ); assert_eq!(v.cast(VariantTypeId::String), Variant::from("8")); assert_eq!(v.cast(VariantTypeId::UInt16), Variant::from(8u16)); } @@ -411,12 +532,27 @@ fn variant_convert_int32() { assert_eq!(v.convert(VariantTypeId::Int64), Variant::Int64(9)); assert_eq!(v.convert(VariantTypeId::UInt64), Variant::UInt64(9)); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::Boolean, VariantTypeId::Byte, VariantTypeId::ByteString, VariantTypeId::DateTime, - VariantTypeId::ExpandedNodeId, VariantTypeId::Guid, VariantTypeId::Int16, VariantTypeId::NodeId, - VariantTypeId::SByte, VariantTypeId::StatusCode, VariantTypeId::String, VariantTypeId::LocalizedText, - VariantTypeId::QualifiedName, VariantTypeId::UInt16, VariantTypeId::UInt32, VariantTypeId::XmlElement - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::Boolean, + VariantTypeId::Byte, + VariantTypeId::ByteString, + VariantTypeId::DateTime, + VariantTypeId::ExpandedNodeId, + VariantTypeId::Guid, + VariantTypeId::Int16, + VariantTypeId::NodeId, + VariantTypeId::SByte, + VariantTypeId::StatusCode, + VariantTypeId::String, + VariantTypeId::LocalizedText, + VariantTypeId::QualifiedName, + VariantTypeId::UInt16, + VariantTypeId::UInt32, + VariantTypeId::XmlElement, + ], + ); } #[test] @@ -424,26 +560,46 @@ fn variant_cast_int32() { let v: Variant = 9i32.into(); // Boolean assert_eq!(v.cast(VariantTypeId::Boolean), Variant::Empty); - assert_eq!(Variant::from(1i32).cast(VariantTypeId::Boolean), Variant::from(true)); + assert_eq!( + Variant::from(1i32).cast(VariantTypeId::Boolean), + Variant::from(true) + ); // Byte assert_eq!(v.cast(VariantTypeId::Byte), Variant::from(9u8)); - assert_eq!(Variant::from(-120i32).cast(VariantTypeId::Byte), Variant::Empty); + assert_eq!( + Variant::from(-120i32).cast(VariantTypeId::Byte), + Variant::Empty + ); // Int16 assert_eq!(v.cast(VariantTypeId::Int16), Variant::from(9i16)); // SByte assert_eq!(v.cast(VariantTypeId::SByte), Variant::from(9i8)); - assert_eq!(Variant::from(-137i32).cast(VariantTypeId::SByte), Variant::Empty); + assert_eq!( + Variant::from(-137i32).cast(VariantTypeId::SByte), + Variant::Empty + ); // StatusCode - let status_code = StatusCode::BadResourceUnavailable | StatusCode::HISTORICAL_RAW | StatusCode::SEMANTICS_CHANGED; - assert_eq!(Variant::from(status_code.bits() as i32).cast(VariantTypeId::StatusCode), Variant::from(status_code)); + let status_code = StatusCode::BadResourceUnavailable + | StatusCode::HISTORICAL_RAW + | StatusCode::SEMANTICS_CHANGED; + assert_eq!( + Variant::from(status_code.bits() as i32).cast(VariantTypeId::StatusCode), + Variant::from(status_code) + ); // String assert_eq!(v.cast(VariantTypeId::String), Variant::from("9")); // UInt16 assert_eq!(v.cast(VariantTypeId::UInt16), Variant::from(9u16)); - assert_eq!(Variant::from(-120i32).cast(VariantTypeId::UInt16), Variant::Empty); + assert_eq!( + Variant::from(-120i32).cast(VariantTypeId::UInt16), + Variant::Empty + ); // UInt32 assert_eq!(v.cast(VariantTypeId::UInt32), Variant::from(9u32)); - assert_eq!(Variant::from(-120i32).cast(VariantTypeId::UInt32), Variant::Empty); + assert_eq!( + Variant::from(-120i32).cast(VariantTypeId::UInt32), + Variant::Empty + ); } #[test] @@ -454,13 +610,29 @@ fn variant_convert_int64() { assert_eq!(v.convert(VariantTypeId::Double), Variant::Double(10.0)); assert_eq!(v.convert(VariantTypeId::Float), Variant::Float(10.0)); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::Boolean, VariantTypeId::Byte, VariantTypeId::ByteString, VariantTypeId::DateTime, - VariantTypeId::ExpandedNodeId, VariantTypeId::Guid, VariantTypeId::Int16, VariantTypeId::Int32, - VariantTypeId::NodeId, VariantTypeId::SByte, VariantTypeId::StatusCode, VariantTypeId::String, - VariantTypeId::LocalizedText, VariantTypeId::QualifiedName, VariantTypeId::UInt16, VariantTypeId::UInt32, - VariantTypeId::UInt64, VariantTypeId::XmlElement, - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::Boolean, + VariantTypeId::Byte, + VariantTypeId::ByteString, + VariantTypeId::DateTime, + VariantTypeId::ExpandedNodeId, + VariantTypeId::Guid, + VariantTypeId::Int16, + VariantTypeId::Int32, + VariantTypeId::NodeId, + VariantTypeId::SByte, + VariantTypeId::StatusCode, + VariantTypeId::String, + VariantTypeId::LocalizedText, + VariantTypeId::QualifiedName, + VariantTypeId::UInt16, + VariantTypeId::UInt32, + VariantTypeId::UInt64, + VariantTypeId::XmlElement, + ], + ); } #[test] @@ -468,29 +640,52 @@ fn variant_cast_int64() { let v: Variant = 10i64.into(); // Boolean assert_eq!(v.cast(VariantTypeId::Boolean), Variant::Empty); - assert_eq!(Variant::from(1i64).cast(VariantTypeId::Boolean), Variant::from(true)); + assert_eq!( + Variant::from(1i64).cast(VariantTypeId::Boolean), + Variant::from(true) + ); // Byte assert_eq!(v.cast(VariantTypeId::Byte), Variant::from(10u8)); - assert_eq!(Variant::from(-120i64).cast(VariantTypeId::Byte), Variant::Empty); + assert_eq!( + Variant::from(-120i64).cast(VariantTypeId::Byte), + Variant::Empty + ); // Int16 assert_eq!(v.cast(VariantTypeId::Int16), Variant::from(10i16)); // SByte assert_eq!(v.cast(VariantTypeId::SByte), Variant::from(10i8)); - assert_eq!(Variant::from(-137i64).cast(VariantTypeId::SByte), Variant::Empty); + assert_eq!( + Variant::from(-137i64).cast(VariantTypeId::SByte), + Variant::Empty + ); // StatusCode - let status_code = StatusCode::BadResourceUnavailable | StatusCode::HISTORICAL_RAW | StatusCode::SEMANTICS_CHANGED; - assert_eq!(Variant::from(status_code.bits() as i64).cast(VariantTypeId::StatusCode), Variant::from(status_code)); + let status_code = StatusCode::BadResourceUnavailable + | StatusCode::HISTORICAL_RAW + | StatusCode::SEMANTICS_CHANGED; + assert_eq!( + Variant::from(status_code.bits() as i64).cast(VariantTypeId::StatusCode), + Variant::from(status_code) + ); // String assert_eq!(v.cast(VariantTypeId::String), Variant::from("10")); // UInt16 assert_eq!(v.cast(VariantTypeId::UInt16), Variant::from(10u16)); - assert_eq!(Variant::from(-120i64).cast(VariantTypeId::UInt16), Variant::Empty); + assert_eq!( + Variant::from(-120i64).cast(VariantTypeId::UInt16), + Variant::Empty + ); // UInt32 assert_eq!(v.cast(VariantTypeId::UInt32), Variant::from(10u32)); - assert_eq!(Variant::from(-120i64).cast(VariantTypeId::UInt32), Variant::Empty); + assert_eq!( + Variant::from(-120i64).cast(VariantTypeId::UInt32), + Variant::Empty + ); // UInt64 assert_eq!(v.cast(VariantTypeId::UInt64), Variant::from(10u64)); - assert_eq!(Variant::from(-120i64).cast(VariantTypeId::UInt32), Variant::Empty); + assert_eq!( + Variant::from(-120i64).cast(VariantTypeId::UInt32), + Variant::Empty + ); } #[test] @@ -507,12 +702,23 @@ fn variant_convert_sbyte() { assert_eq!(v.convert(VariantTypeId::UInt32), Variant::UInt32(12)); assert_eq!(v.convert(VariantTypeId::UInt64), Variant::UInt64(12)); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::Boolean, VariantTypeId::Byte, VariantTypeId::ByteString, VariantTypeId::DateTime, - VariantTypeId::ExpandedNodeId, VariantTypeId::Guid, VariantTypeId::NodeId, VariantTypeId::StatusCode, - VariantTypeId::String, VariantTypeId::LocalizedText, VariantTypeId::QualifiedName, - VariantTypeId::XmlElement - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::Boolean, + VariantTypeId::Byte, + VariantTypeId::ByteString, + VariantTypeId::DateTime, + VariantTypeId::ExpandedNodeId, + VariantTypeId::Guid, + VariantTypeId::NodeId, + VariantTypeId::StatusCode, + VariantTypeId::String, + VariantTypeId::LocalizedText, + VariantTypeId::QualifiedName, + VariantTypeId::XmlElement, + ], + ); } #[test] @@ -520,10 +726,16 @@ fn variant_cast_sbyte() { let v: Variant = 12i8.into(); // Boolean assert_eq!(v.cast(VariantTypeId::Boolean), Variant::Empty); - assert_eq!(Variant::from(1i8).cast(VariantTypeId::Boolean), Variant::from(true)); + assert_eq!( + Variant::from(1i8).cast(VariantTypeId::Boolean), + Variant::from(true) + ); // Byte assert_eq!(v.cast(VariantTypeId::Byte), Variant::from(12u8)); - assert_eq!(Variant::from(-120i8).cast(VariantTypeId::Byte), Variant::Empty); + assert_eq!( + Variant::from(-120i8).cast(VariantTypeId::Byte), + Variant::Empty + ); // String assert_eq!(v.cast(VariantTypeId::String), Variant::from("12")); } @@ -533,47 +745,129 @@ fn variant_convert_string() { let v = Variant::from("Reflexive Test"); assert_eq!(v.convert(v.type_id()), v); // Boolean - assert_eq!(Variant::from("1").convert(VariantTypeId::Boolean), true.into()); - assert_eq!(Variant::from("0").convert(VariantTypeId::Boolean), false.into()); - assert_eq!(Variant::from("true").convert(VariantTypeId::Boolean), true.into()); - assert_eq!(Variant::from("false").convert(VariantTypeId::Boolean), false.into()); - assert_eq!(Variant::from(" false").convert(VariantTypeId::Boolean), Variant::Empty); + assert_eq!( + Variant::from("1").convert(VariantTypeId::Boolean), + true.into() + ); + assert_eq!( + Variant::from("0").convert(VariantTypeId::Boolean), + false.into() + ); + assert_eq!( + Variant::from("true").convert(VariantTypeId::Boolean), + true.into() + ); + assert_eq!( + Variant::from("false").convert(VariantTypeId::Boolean), + false.into() + ); + assert_eq!( + Variant::from(" false").convert(VariantTypeId::Boolean), + Variant::Empty + ); // Byte - assert_eq!(Variant::from("12").convert(VariantTypeId::Byte), 12u8.into()); - assert_eq!(Variant::from("256").convert(VariantTypeId::Byte), Variant::Empty); + assert_eq!( + Variant::from("12").convert(VariantTypeId::Byte), + 12u8.into() + ); + assert_eq!( + Variant::from("256").convert(VariantTypeId::Byte), + Variant::Empty + ); // Double - assert_eq!(Variant::from("12.5").convert(VariantTypeId::Double), 12.5f64.into()); + assert_eq!( + Variant::from("12.5").convert(VariantTypeId::Double), + 12.5f64.into() + ); // Float - assert_eq!(Variant::from("12.5").convert(VariantTypeId::Float), 12.5f32.into()); + assert_eq!( + Variant::from("12.5").convert(VariantTypeId::Float), + 12.5f32.into() + ); // Guid - assert_eq!(Variant::from("d47a32c9-5ee7-43c1-a733-0fe30bf26b50").convert(VariantTypeId::Guid), Guid::from_str("d47a32c9-5ee7-43c1-a733-0fe30bf26b50").unwrap().into()); + assert_eq!( + Variant::from("d47a32c9-5ee7-43c1-a733-0fe30bf26b50").convert(VariantTypeId::Guid), + Guid::from_str("d47a32c9-5ee7-43c1-a733-0fe30bf26b50") + .unwrap() + .into() + ); // Int16 - assert_eq!(Variant::from("12").convert(VariantTypeId::Int16), 12i16.into()); - assert_eq!(Variant::from("65536").convert(VariantTypeId::Int16), Variant::Empty); + assert_eq!( + Variant::from("12").convert(VariantTypeId::Int16), + 12i16.into() + ); + assert_eq!( + Variant::from("65536").convert(VariantTypeId::Int16), + Variant::Empty + ); // Int32 - assert_eq!(Variant::from("12").convert(VariantTypeId::Int32), 12i32.into()); - assert_eq!(Variant::from("2147483648").convert(VariantTypeId::Int32), Variant::Empty); + assert_eq!( + Variant::from("12").convert(VariantTypeId::Int32), + 12i32.into() + ); + assert_eq!( + Variant::from("2147483648").convert(VariantTypeId::Int32), + Variant::Empty + ); // Int64 - assert_eq!(Variant::from("12").convert(VariantTypeId::Int64), 12i64.into()); - assert_eq!(Variant::from("9223372036854775808").convert(VariantTypeId::Int64), Variant::Empty); + assert_eq!( + Variant::from("12").convert(VariantTypeId::Int64), + 12i64.into() + ); + assert_eq!( + Variant::from("9223372036854775808").convert(VariantTypeId::Int64), + Variant::Empty + ); // SByte - assert_eq!(Variant::from("12").convert(VariantTypeId::SByte), 12i8.into()); - assert_eq!(Variant::from("128").convert(VariantTypeId::SByte), Variant::Empty); - assert_eq!(Variant::from("-129").convert(VariantTypeId::SByte), Variant::Empty); + assert_eq!( + Variant::from("12").convert(VariantTypeId::SByte), + 12i8.into() + ); + assert_eq!( + Variant::from("128").convert(VariantTypeId::SByte), + Variant::Empty + ); + assert_eq!( + Variant::from("-129").convert(VariantTypeId::SByte), + Variant::Empty + ); // UInt16 - assert_eq!(Variant::from("12").convert(VariantTypeId::UInt16), 12u16.into()); - assert_eq!(Variant::from("65536").convert(VariantTypeId::UInt16), Variant::Empty); + assert_eq!( + Variant::from("12").convert(VariantTypeId::UInt16), + 12u16.into() + ); + assert_eq!( + Variant::from("65536").convert(VariantTypeId::UInt16), + Variant::Empty + ); // UInt32 - assert_eq!(Variant::from("12").convert(VariantTypeId::UInt32), 12u32.into()); - assert_eq!(Variant::from("4294967296").convert(VariantTypeId::UInt32), Variant::Empty); + assert_eq!( + Variant::from("12").convert(VariantTypeId::UInt32), + 12u32.into() + ); + assert_eq!( + Variant::from("4294967296").convert(VariantTypeId::UInt32), + Variant::Empty + ); // UInt64 - assert_eq!(Variant::from("12").convert(VariantTypeId::UInt64), 12u64.into()); - assert_eq!(Variant::from("18446744073709551615").convert(VariantTypeId::UInt32), Variant::Empty); + assert_eq!( + Variant::from("12").convert(VariantTypeId::UInt64), + 12u64.into() + ); + assert_eq!( + Variant::from("18446744073709551615").convert(VariantTypeId::UInt32), + Variant::Empty + ); // Impermissible let v = Variant::from("xxx"); - ensure_conversion_fails(&v, &[ - VariantTypeId::ByteString, VariantTypeId::StatusCode, VariantTypeId::XmlElement - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::ByteString, + VariantTypeId::StatusCode, + VariantTypeId::XmlElement, + ], + ); } #[test] @@ -584,17 +878,30 @@ fn variant_cast_string() { let now_v: Variant = now.into(); assert_eq!(Variant::from(now_s).cast(VariantTypeId::DateTime), now_v); // ExpandedNodeId - assert_eq!(Variant::from("svr=5;ns=22;s=Hello World").cast(VariantTypeId::ExpandedNodeId), ExpandedNodeId { - node_id: NodeId::new(22, "Hello World"), - namespace_uri: UAString::null(), - server_index: 5, - }.into()); + assert_eq!( + Variant::from("svr=5;ns=22;s=Hello World").cast(VariantTypeId::ExpandedNodeId), + ExpandedNodeId { + node_id: NodeId::new(22, "Hello World"), + namespace_uri: UAString::null(), + server_index: 5, + } + .into() + ); // NodeId - assert_eq!(Variant::from("ns=22;s=Hello World").cast(VariantTypeId::NodeId), NodeId::new(22, "Hello World").into()); + assert_eq!( + Variant::from("ns=22;s=Hello World").cast(VariantTypeId::NodeId), + NodeId::new(22, "Hello World").into() + ); // LocalizedText - assert_eq!(Variant::from("Localized Text").cast(VariantTypeId::LocalizedText), LocalizedText::new("", "Localized Text").into()); + assert_eq!( + Variant::from("Localized Text").cast(VariantTypeId::LocalizedText), + LocalizedText::new("", "Localized Text").into() + ); // QualifiedName - assert_eq!(Variant::from("Qualified Name").cast(VariantTypeId::QualifiedName), QualifiedName::new(0, "Qualified Name").into()); + assert_eq!( + Variant::from("Qualified Name").cast(VariantTypeId::QualifiedName), + QualifiedName::new(0, "Qualified Name").into() + ); } #[test] @@ -607,16 +914,30 @@ fn variant_convert_uint16() { assert_eq!(v.convert(VariantTypeId::Int16), Variant::Int16(80)); assert_eq!(v.convert(VariantTypeId::Int32), Variant::Int32(80)); assert_eq!(v.convert(VariantTypeId::Int64), Variant::Int64(80)); - assert_eq!(v.convert(VariantTypeId::StatusCode), Variant::StatusCode(StatusCode::from_bits_truncate(80 << 16))); + assert_eq!( + v.convert(VariantTypeId::StatusCode), + Variant::StatusCode(StatusCode::from_bits_truncate(80 << 16)) + ); assert_eq!(v.convert(VariantTypeId::UInt32), Variant::UInt32(80)); assert_eq!(v.convert(VariantTypeId::UInt64), Variant::UInt64(80)); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::Boolean, VariantTypeId::Byte, VariantTypeId::ByteString, VariantTypeId::DateTime, - VariantTypeId::ExpandedNodeId, VariantTypeId::Guid, VariantTypeId::SByte, VariantTypeId::String, - VariantTypeId::NodeId, VariantTypeId::LocalizedText, VariantTypeId::QualifiedName, - VariantTypeId::XmlElement, - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::Boolean, + VariantTypeId::Byte, + VariantTypeId::ByteString, + VariantTypeId::DateTime, + VariantTypeId::ExpandedNodeId, + VariantTypeId::Guid, + VariantTypeId::SByte, + VariantTypeId::String, + VariantTypeId::NodeId, + VariantTypeId::LocalizedText, + VariantTypeId::QualifiedName, + VariantTypeId::XmlElement, + ], + ); } #[test] @@ -624,13 +945,22 @@ fn variant_cast_uint16() { let v: Variant = 80u16.into(); // Boolean assert_eq!(v.cast(VariantTypeId::Boolean), Variant::Empty); - assert_eq!(Variant::from(1u16).cast(VariantTypeId::Boolean), Variant::from(true)); + assert_eq!( + Variant::from(1u16).cast(VariantTypeId::Boolean), + Variant::from(true) + ); // Byte assert_eq!(v.cast(VariantTypeId::Byte), Variant::from(80u8)); - assert_eq!(Variant::from(256u16).cast(VariantTypeId::Byte), Variant::Empty); + assert_eq!( + Variant::from(256u16).cast(VariantTypeId::Byte), + Variant::Empty + ); // SByte assert_eq!(v.cast(VariantTypeId::SByte), Variant::from(80i8)); - assert_eq!(Variant::from(128u16).cast(VariantTypeId::SByte), Variant::Empty); + assert_eq!( + Variant::from(128u16).cast(VariantTypeId::SByte), + Variant::Empty + ); // String assert_eq!(v.cast(VariantTypeId::String), Variant::from("80")); } @@ -647,12 +977,26 @@ fn variant_convert_uint32() { assert_eq!(v.convert(VariantTypeId::UInt32), Variant::UInt32(23)); assert_eq!(v.convert(VariantTypeId::UInt64), Variant::UInt64(23)); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::Boolean, VariantTypeId::Byte, VariantTypeId::ByteString, VariantTypeId::DateTime, - VariantTypeId::ExpandedNodeId, VariantTypeId::Guid, VariantTypeId::Int16, VariantTypeId::SByte, - VariantTypeId::StatusCode, VariantTypeId::String, VariantTypeId::NodeId, VariantTypeId::LocalizedText, - VariantTypeId::QualifiedName, VariantTypeId::UInt16, VariantTypeId::XmlElement - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::Boolean, + VariantTypeId::Byte, + VariantTypeId::ByteString, + VariantTypeId::DateTime, + VariantTypeId::ExpandedNodeId, + VariantTypeId::Guid, + VariantTypeId::Int16, + VariantTypeId::SByte, + VariantTypeId::StatusCode, + VariantTypeId::String, + VariantTypeId::NodeId, + VariantTypeId::LocalizedText, + VariantTypeId::QualifiedName, + VariantTypeId::UInt16, + VariantTypeId::XmlElement, + ], + ); } #[test] @@ -660,24 +1004,44 @@ fn variant_cast_uint32() { let v: Variant = 23u32.into(); // Boolean assert_eq!(v.cast(VariantTypeId::Boolean), Variant::Empty); - assert_eq!(Variant::from(1u32).cast(VariantTypeId::Boolean), Variant::from(true)); + assert_eq!( + Variant::from(1u32).cast(VariantTypeId::Boolean), + Variant::from(true) + ); // Byte assert_eq!(v.cast(VariantTypeId::Byte), Variant::from(23u8)); - assert_eq!(Variant::from(256u32).cast(VariantTypeId::Byte), Variant::Empty); + assert_eq!( + Variant::from(256u32).cast(VariantTypeId::Byte), + Variant::Empty + ); // Int16 assert_eq!(v.cast(VariantTypeId::Int16), Variant::from(23i16)); - assert_eq!(Variant::from(102256u32).cast(VariantTypeId::Int16), Variant::Empty); + assert_eq!( + Variant::from(102256u32).cast(VariantTypeId::Int16), + Variant::Empty + ); // SByte assert_eq!(v.cast(VariantTypeId::SByte), Variant::from(23i8)); - assert_eq!(Variant::from(128u32).cast(VariantTypeId::SByte), Variant::Empty); + assert_eq!( + Variant::from(128u32).cast(VariantTypeId::SByte), + Variant::Empty + ); // StatusCode - let status_code = StatusCode::BadResourceUnavailable | StatusCode::HISTORICAL_RAW | StatusCode::SEMANTICS_CHANGED; - assert_eq!(Variant::from(status_code.bits() as u32).cast(VariantTypeId::StatusCode), Variant::from(status_code)); + let status_code = StatusCode::BadResourceUnavailable + | StatusCode::HISTORICAL_RAW + | StatusCode::SEMANTICS_CHANGED; + assert_eq!( + Variant::from(status_code.bits() as u32).cast(VariantTypeId::StatusCode), + Variant::from(status_code) + ); // String assert_eq!(v.cast(VariantTypeId::String), Variant::from("23")); // UInt16 assert_eq!(v.cast(VariantTypeId::UInt16), Variant::from(23u16)); - assert_eq!(Variant::from(102256u32).cast(VariantTypeId::UInt16), Variant::Empty); + assert_eq!( + Variant::from(102256u32).cast(VariantTypeId::UInt16), + Variant::Empty + ); } #[test] @@ -690,13 +1054,28 @@ fn variant_convert_uint64() { assert_eq!(v.convert(VariantTypeId::Int64), Variant::Int64(43)); assert_eq!(v.convert(VariantTypeId::UInt64), Variant::UInt64(43)); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::Boolean, VariantTypeId::Byte, VariantTypeId::ByteString, VariantTypeId::DateTime, - VariantTypeId::ExpandedNodeId, VariantTypeId::Guid, VariantTypeId::Int16, VariantTypeId::Int32, - VariantTypeId::SByte, VariantTypeId::StatusCode, VariantTypeId::String, VariantTypeId::NodeId, - VariantTypeId::LocalizedText, VariantTypeId::QualifiedName, VariantTypeId::UInt16, VariantTypeId::UInt32, - VariantTypeId::XmlElement, - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::Boolean, + VariantTypeId::Byte, + VariantTypeId::ByteString, + VariantTypeId::DateTime, + VariantTypeId::ExpandedNodeId, + VariantTypeId::Guid, + VariantTypeId::Int16, + VariantTypeId::Int32, + VariantTypeId::SByte, + VariantTypeId::StatusCode, + VariantTypeId::String, + VariantTypeId::NodeId, + VariantTypeId::LocalizedText, + VariantTypeId::QualifiedName, + VariantTypeId::UInt16, + VariantTypeId::UInt32, + VariantTypeId::XmlElement, + ], + ); } #[test] @@ -704,27 +1083,50 @@ fn variant_cast_uint64() { let v: Variant = 43u64.into(); // Boolean assert_eq!(v.cast(VariantTypeId::Boolean), Variant::Empty); - assert_eq!(Variant::from(1u64).cast(VariantTypeId::Boolean), Variant::from(true)); + assert_eq!( + Variant::from(1u64).cast(VariantTypeId::Boolean), + Variant::from(true) + ); // Byte assert_eq!(v.cast(VariantTypeId::Byte), Variant::from(43u8)); - assert_eq!(Variant::from(256u64).cast(VariantTypeId::Byte), Variant::Empty); + assert_eq!( + Variant::from(256u64).cast(VariantTypeId::Byte), + Variant::Empty + ); // Int16 assert_eq!(v.cast(VariantTypeId::Int16), Variant::from(43i16)); - assert_eq!(Variant::from(102256u64).cast(VariantTypeId::Int16), Variant::Empty); + assert_eq!( + Variant::from(102256u64).cast(VariantTypeId::Int16), + Variant::Empty + ); // SByte assert_eq!(v.cast(VariantTypeId::SByte), Variant::from(43i8)); - assert_eq!(Variant::from(128u64).cast(VariantTypeId::SByte), Variant::Empty); + assert_eq!( + Variant::from(128u64).cast(VariantTypeId::SByte), + Variant::Empty + ); // StatusCode - let status_code = StatusCode::BadResourceUnavailable | StatusCode::HISTORICAL_RAW | StatusCode::SEMANTICS_CHANGED; - assert_eq!(Variant::from(status_code.bits() as u64).cast(VariantTypeId::StatusCode), Variant::from(status_code)); + let status_code = StatusCode::BadResourceUnavailable + | StatusCode::HISTORICAL_RAW + | StatusCode::SEMANTICS_CHANGED; + assert_eq!( + Variant::from(status_code.bits() as u64).cast(VariantTypeId::StatusCode), + Variant::from(status_code) + ); // String assert_eq!(v.cast(VariantTypeId::String), Variant::from("43")); // UInt16 assert_eq!(v.cast(VariantTypeId::UInt16), Variant::from(43u16)); - assert_eq!(Variant::from(102256u64).cast(VariantTypeId::UInt16), Variant::Empty); + assert_eq!( + Variant::from(102256u64).cast(VariantTypeId::UInt16), + Variant::Empty + ); // UInt32 assert_eq!(v.cast(VariantTypeId::UInt32), Variant::from(43u32)); - assert_eq!(Variant::from(4294967298u64).cast(VariantTypeId::UInt32), Variant::Empty); + assert_eq!( + Variant::from(4294967298u64).cast(VariantTypeId::UInt32), + Variant::Empty + ); } #[test] @@ -738,14 +1140,31 @@ fn variant_cast_date_time() { fn variant_convert_guid() { let v = Variant::from(Guid::new()); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::Boolean, VariantTypeId::Byte, VariantTypeId::ByteString, VariantTypeId::DateTime, - VariantTypeId::Double, VariantTypeId::ExpandedNodeId, VariantTypeId::Float, - VariantTypeId::Int16, VariantTypeId::Int32, VariantTypeId::Int64, VariantTypeId::NodeId, - VariantTypeId::SByte, VariantTypeId::StatusCode, VariantTypeId::String, - VariantTypeId::LocalizedText, VariantTypeId::QualifiedName, VariantTypeId::UInt16, VariantTypeId::UInt32, - VariantTypeId::UInt64, VariantTypeId::XmlElement, - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::Boolean, + VariantTypeId::Byte, + VariantTypeId::ByteString, + VariantTypeId::DateTime, + VariantTypeId::Double, + VariantTypeId::ExpandedNodeId, + VariantTypeId::Float, + VariantTypeId::Int16, + VariantTypeId::Int32, + VariantTypeId::Int64, + VariantTypeId::NodeId, + VariantTypeId::SByte, + VariantTypeId::StatusCode, + VariantTypeId::String, + VariantTypeId::LocalizedText, + VariantTypeId::QualifiedName, + VariantTypeId::UInt16, + VariantTypeId::UInt32, + VariantTypeId::UInt64, + VariantTypeId::XmlElement, + ], + ); } #[test] @@ -764,23 +1183,48 @@ fn variant_convert_status_code() { let v = Variant::from(StatusCode::BadInvalidArgument); assert_eq!(v.convert(v.type_id()), v); // Implicit Int32, Int64, UInt32, UInt64 - assert_eq!(v.convert(VariantTypeId::Int32), Variant::Int32(-2136276992i32)); // 0x80AB_0000 overflows to negative + assert_eq!( + v.convert(VariantTypeId::Int32), + Variant::Int32(-2136276992i32) + ); // 0x80AB_0000 overflows to negative assert_eq!(v.convert(VariantTypeId::Int64), Variant::Int64(0x80AB_0000)); - assert_eq!(v.convert(VariantTypeId::UInt32), Variant::UInt32(0x80AB_0000)); - assert_eq!(v.convert(VariantTypeId::UInt64), Variant::UInt64(0x80AB_0000)); + assert_eq!( + v.convert(VariantTypeId::UInt32), + Variant::UInt32(0x80AB_0000) + ); + assert_eq!( + v.convert(VariantTypeId::UInt64), + Variant::UInt64(0x80AB_0000) + ); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::Boolean, VariantTypeId::Byte, VariantTypeId::ByteString, VariantTypeId::DateTime, - VariantTypeId::Double, VariantTypeId::ExpandedNodeId, VariantTypeId::Float, VariantTypeId::Guid, - VariantTypeId::Int16, VariantTypeId::NodeId, VariantTypeId::SByte, VariantTypeId::String, - VariantTypeId::LocalizedText, VariantTypeId::QualifiedName, VariantTypeId::UInt16, - VariantTypeId::XmlElement, - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::Boolean, + VariantTypeId::Byte, + VariantTypeId::ByteString, + VariantTypeId::DateTime, + VariantTypeId::Double, + VariantTypeId::ExpandedNodeId, + VariantTypeId::Float, + VariantTypeId::Guid, + VariantTypeId::Int16, + VariantTypeId::NodeId, + VariantTypeId::SByte, + VariantTypeId::String, + VariantTypeId::LocalizedText, + VariantTypeId::QualifiedName, + VariantTypeId::UInt16, + VariantTypeId::XmlElement, + ], + ); } #[test] fn variant_cast_status_code() { - let status_code = StatusCode::BadResourceUnavailable | StatusCode::HISTORICAL_RAW | StatusCode::SEMANTICS_CHANGED; + let status_code = StatusCode::BadResourceUnavailable + | StatusCode::HISTORICAL_RAW + | StatusCode::SEMANTICS_CHANGED; let v = Variant::from(status_code); // Cast UInt16 (BadResourceUnavailable == 0x8004_0000) assert_eq!(v.cast(VariantTypeId::UInt16), Variant::UInt16(0x8004)); @@ -791,14 +1235,31 @@ fn variant_convert_byte_string() { let v = Variant::from(ByteString::from(b"test")); assert_eq!(v.convert(v.type_id()), v); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::Boolean, VariantTypeId::Byte, VariantTypeId::DateTime, VariantTypeId::Double, - VariantTypeId::ExpandedNodeId, VariantTypeId::Float, VariantTypeId::Guid, - VariantTypeId::Int16, VariantTypeId::Int32, VariantTypeId::Int64, VariantTypeId::NodeId, - VariantTypeId::SByte, VariantTypeId::StatusCode, VariantTypeId::String, - VariantTypeId::LocalizedText, VariantTypeId::QualifiedName, VariantTypeId::UInt16, VariantTypeId::UInt32, - VariantTypeId::UInt64, VariantTypeId::XmlElement, - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::Boolean, + VariantTypeId::Byte, + VariantTypeId::DateTime, + VariantTypeId::Double, + VariantTypeId::ExpandedNodeId, + VariantTypeId::Float, + VariantTypeId::Guid, + VariantTypeId::Int16, + VariantTypeId::Int32, + VariantTypeId::Int64, + VariantTypeId::NodeId, + VariantTypeId::SByte, + VariantTypeId::StatusCode, + VariantTypeId::String, + VariantTypeId::LocalizedText, + VariantTypeId::QualifiedName, + VariantTypeId::UInt16, + VariantTypeId::UInt32, + VariantTypeId::UInt64, + VariantTypeId::XmlElement, + ], + ); } #[test] @@ -814,17 +1275,36 @@ fn variant_convert_qualified_name() { let v = Variant::from(QualifiedName::new(123, "hello")); assert_eq!(v.convert(v.type_id()), v); // LocalizedText - assert_eq!(v.convert(VariantTypeId::LocalizedText), Variant::from(LocalizedText::new("", "hello"))); + assert_eq!( + v.convert(VariantTypeId::LocalizedText), + Variant::from(LocalizedText::new("", "hello")) + ); // String assert_eq!(v.convert(VariantTypeId::String), Variant::from("hello")); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::Boolean, VariantTypeId::Byte, VariantTypeId::ByteString, VariantTypeId::DateTime, - VariantTypeId::Double, VariantTypeId::ExpandedNodeId, VariantTypeId::Float, VariantTypeId::Guid, - VariantTypeId::Int16, VariantTypeId::Int32, VariantTypeId::Int64, VariantTypeId::NodeId, - VariantTypeId::SByte, VariantTypeId::StatusCode, VariantTypeId::UInt16, VariantTypeId::UInt32, - VariantTypeId::UInt64, VariantTypeId::XmlElement, - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::Boolean, + VariantTypeId::Byte, + VariantTypeId::ByteString, + VariantTypeId::DateTime, + VariantTypeId::Double, + VariantTypeId::ExpandedNodeId, + VariantTypeId::Float, + VariantTypeId::Guid, + VariantTypeId::Int16, + VariantTypeId::Int32, + VariantTypeId::Int64, + VariantTypeId::NodeId, + VariantTypeId::SByte, + VariantTypeId::StatusCode, + VariantTypeId::UInt16, + VariantTypeId::UInt32, + VariantTypeId::UInt64, + VariantTypeId::XmlElement, + ], + ); } #[test] @@ -834,13 +1314,30 @@ fn variant_convert_localized_text() { // String assert_eq!(v.convert(VariantTypeId::String), Variant::from("bonjour")); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::Boolean, VariantTypeId::Byte, VariantTypeId::ByteString, VariantTypeId::DateTime, - VariantTypeId::Double, VariantTypeId::ExpandedNodeId, VariantTypeId::Float, VariantTypeId::Guid, - VariantTypeId::Int16, VariantTypeId::Int32, VariantTypeId::Int64, VariantTypeId::NodeId, - VariantTypeId::SByte, VariantTypeId::StatusCode, VariantTypeId::QualifiedName, - VariantTypeId::UInt16, VariantTypeId::UInt32, VariantTypeId::UInt64, VariantTypeId::XmlElement, - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::Boolean, + VariantTypeId::Byte, + VariantTypeId::ByteString, + VariantTypeId::DateTime, + VariantTypeId::Double, + VariantTypeId::ExpandedNodeId, + VariantTypeId::Float, + VariantTypeId::Guid, + VariantTypeId::Int16, + VariantTypeId::Int32, + VariantTypeId::Int64, + VariantTypeId::NodeId, + VariantTypeId::SByte, + VariantTypeId::StatusCode, + VariantTypeId::QualifiedName, + VariantTypeId::UInt16, + VariantTypeId::UInt32, + VariantTypeId::UInt64, + VariantTypeId::XmlElement, + ], + ); } #[test] @@ -848,21 +1345,42 @@ fn variant_convert_node_id() { let v = Variant::from(NodeId::new(99, "my node")); assert_eq!(v.convert(v.type_id()), v); // ExpandedNodeId - assert_eq!(v.convert(VariantTypeId::ExpandedNodeId), Variant::from(ExpandedNodeId { - node_id: NodeId::new(99, "my node"), - namespace_uri: UAString::null(), - server_index: 0, - })); + assert_eq!( + v.convert(VariantTypeId::ExpandedNodeId), + Variant::from(ExpandedNodeId { + node_id: NodeId::new(99, "my node"), + namespace_uri: UAString::null(), + server_index: 0, + }) + ); // String - assert_eq!(v.convert(VariantTypeId::String), Variant::from("ns=99;s=my node")); + assert_eq!( + v.convert(VariantTypeId::String), + Variant::from("ns=99;s=my node") + ); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::Boolean, VariantTypeId::Byte, VariantTypeId::ByteString, VariantTypeId::DateTime, - VariantTypeId::Double, VariantTypeId::Float, VariantTypeId::Guid, VariantTypeId::Int16, VariantTypeId::Int32, - VariantTypeId::Int64, VariantTypeId::SByte, VariantTypeId::LocalizedText, - VariantTypeId::QualifiedName, VariantTypeId::UInt16, VariantTypeId::UInt32, - VariantTypeId::UInt64, VariantTypeId::XmlElement, - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::Boolean, + VariantTypeId::Byte, + VariantTypeId::ByteString, + VariantTypeId::DateTime, + VariantTypeId::Double, + VariantTypeId::Float, + VariantTypeId::Guid, + VariantTypeId::Int16, + VariantTypeId::Int32, + VariantTypeId::Int64, + VariantTypeId::SByte, + VariantTypeId::LocalizedText, + VariantTypeId::QualifiedName, + VariantTypeId::UInt16, + VariantTypeId::UInt32, + VariantTypeId::UInt64, + VariantTypeId::XmlElement, + ], + ); } #[test] @@ -874,15 +1392,34 @@ fn variant_convert_expanded_node_id() { }); assert_eq!(v.convert(v.type_id()), v); // String - assert_eq!(v.convert(VariantTypeId::String), Variant::from("svr=5;ns=22;s=Hello World")); + assert_eq!( + v.convert(VariantTypeId::String), + Variant::from("svr=5;ns=22;s=Hello World") + ); // Impermissible - ensure_conversion_fails(&v, &[ - VariantTypeId::Boolean, VariantTypeId::Byte, VariantTypeId::ByteString, VariantTypeId::DateTime, - VariantTypeId::Double, VariantTypeId::Float, VariantTypeId::Guid, VariantTypeId::Int16, VariantTypeId::Int32, - VariantTypeId::Int64, VariantTypeId::NodeId, VariantTypeId::SByte, VariantTypeId::LocalizedText, - VariantTypeId::QualifiedName, VariantTypeId::UInt16, VariantTypeId::UInt32, - VariantTypeId::UInt64, VariantTypeId::XmlElement, - ]); + ensure_conversion_fails( + &v, + &[ + VariantTypeId::Boolean, + VariantTypeId::Byte, + VariantTypeId::ByteString, + VariantTypeId::DateTime, + VariantTypeId::Double, + VariantTypeId::Float, + VariantTypeId::Guid, + VariantTypeId::Int16, + VariantTypeId::Int32, + VariantTypeId::Int64, + VariantTypeId::NodeId, + VariantTypeId::SByte, + VariantTypeId::LocalizedText, + VariantTypeId::QualifiedName, + VariantTypeId::UInt16, + VariantTypeId::UInt32, + VariantTypeId::UInt64, + VariantTypeId::XmlElement, + ], + ); } #[test] @@ -893,7 +1430,10 @@ fn variant_cast_expanded_node_id() { server_index: 5, }); // NodeId - assert_eq!(v.cast(VariantTypeId::NodeId), Variant::from(NodeId::new(22, "Hello World"))); + assert_eq!( + v.cast(VariantTypeId::NodeId), + Variant::from(NodeId::new(22, "Hello World")) + ); } #[test] @@ -905,10 +1445,8 @@ fn variant_bytestring_to_bytearray() { assert_eq!(v.array_data_type().unwrap(), DataTypeId::Byte.into()); let array = match v { - Variant::Array(v) => { - v - } - _ => panic!() + Variant::Array(v) => v, + _ => panic!(), }; let v = array.values; diff --git a/types/src/variant.rs b/types/src/variant.rs index 9715184a4..50f280a00 100644 --- a/types/src/variant.rs +++ b/types/src/variant.rs @@ -4,11 +4,11 @@ //! Contains the implementation of `Variant`. -use std::{i16, i32, i64, i8, u16, u32, u64, u8}; use std::convert::TryFrom; use std::fmt; use std::io::{Read, Write}; use std::str::FromStr; +use std::{i16, i32, i64, i8, u16, u32, u64, u8}; use crate::{ array::*, @@ -133,17 +133,33 @@ impl TryFrom<&NodeId> for VariantTypeId { type_id if type_id == DataTypeId::Float as u32 => Ok(VariantTypeId::Float), type_id if type_id == DataTypeId::Double as u32 => Ok(VariantTypeId::Double), type_id if type_id == DataTypeId::String as u32 => Ok(VariantTypeId::String), - type_id if type_id == DataTypeId::DateTime as u32 => Ok(VariantTypeId::DateTime), + type_id if type_id == DataTypeId::DateTime as u32 => { + Ok(VariantTypeId::DateTime) + } type_id if type_id == DataTypeId::Guid as u32 => Ok(VariantTypeId::Guid), - type_id if type_id == DataTypeId::ByteString as u32 => Ok(VariantTypeId::ByteString), - type_id if type_id == DataTypeId::XmlElement as u32 => Ok(VariantTypeId::XmlElement), + type_id if type_id == DataTypeId::ByteString as u32 => { + Ok(VariantTypeId::ByteString) + } + type_id if type_id == DataTypeId::XmlElement as u32 => { + Ok(VariantTypeId::XmlElement) + } type_id if type_id == DataTypeId::NodeId as u32 => Ok(VariantTypeId::NodeId), - type_id if type_id == DataTypeId::ExpandedNodeId as u32 => Ok(VariantTypeId::ExpandedNodeId), - type_id if type_id == DataTypeId::XmlElement as u32 => Ok(VariantTypeId::XmlElement), - type_id if type_id == DataTypeId::StatusCode as u32 => Ok(VariantTypeId::StatusCode), - type_id if type_id == DataTypeId::QualifiedName as u32 => Ok(VariantTypeId::QualifiedName), - type_id if type_id == DataTypeId::LocalizedText as u32 => Ok(VariantTypeId::LocalizedText), - _ => Err(()) + type_id if type_id == DataTypeId::ExpandedNodeId as u32 => { + Ok(VariantTypeId::ExpandedNodeId) + } + type_id if type_id == DataTypeId::XmlElement as u32 => { + Ok(VariantTypeId::XmlElement) + } + type_id if type_id == DataTypeId::StatusCode as u32 => { + Ok(VariantTypeId::StatusCode) + } + type_id if type_id == DataTypeId::QualifiedName as u32 => { + Ok(VariantTypeId::QualifiedName) + } + type_id if type_id == DataTypeId::LocalizedText as u32 => { + Ok(VariantTypeId::LocalizedText) + } + _ => Err(()), } } else { Err(()) @@ -158,12 +174,17 @@ impl VariantTypeId { /// Tests and returns true if the variant holds a numeric type pub fn is_numeric(&self) -> bool { match self { - VariantTypeId::SByte | VariantTypeId::Byte | - VariantTypeId::Int16 | VariantTypeId::UInt16 | - VariantTypeId::Int32 | VariantTypeId::UInt32 | - VariantTypeId::Int64 | VariantTypeId::UInt64 | - VariantTypeId::Float | VariantTypeId::Double => true, - _ => false + VariantTypeId::SByte + | VariantTypeId::Byte + | VariantTypeId::Int16 + | VariantTypeId::UInt16 + | VariantTypeId::Int32 + | VariantTypeId::UInt32 + | VariantTypeId::Int64 + | VariantTypeId::UInt64 + | VariantTypeId::Float + | VariantTypeId::Double => true, + _ => false, } } @@ -373,7 +394,7 @@ macro_rules! cast_to_bool { } else { Variant::Empty } - } + }; } macro_rules! cast_to_integer { @@ -421,7 +442,7 @@ macro_rules! from_array_to_variant_impl { Variant::from(array) } } - } + }; } from_array_to_variant_impl!(String); @@ -439,7 +460,6 @@ from_array_to_variant_impl!(f64); /// If the Variant holds macro_rules! try_from_variant_to_array_impl { ($rtype: ident, $vtype: ident) => { - impl TryFrom<&Variant> for Vec<$rtype> { type Error = (); @@ -450,20 +470,23 @@ macro_rules! try_from_variant_to_array_impl { if !values_are_of_type(values, VariantTypeId::$vtype) { Err(()) } else { - Ok(values.iter().map(|v| { - if let Variant::$vtype(v) = v { - *v - } else { - panic!() - } - }).collect()) + Ok(values + .iter() + .map(|v| { + if let Variant::$vtype(v) = v { + *v + } else { + panic!() + } + }) + .collect()) } } - _ => Err(()) + _ => Err(()), } } } - } + }; } // These are implementations of TryFrom which will attempt to transform a single dimension array @@ -516,7 +539,11 @@ impl BinaryEncoder for Variant { // Array length let mut size = 4; // Size of each value - size += array.values.iter().map(|v| Variant::byte_len_variant_value(v)).sum::(); + size += array + .values + .iter() + .map(|v| Variant::byte_len_variant_value(v)) + .sum::(); if array.has_dimensions() { // Dimensions (size + num elements) size += 4 + array.dimensions.len() * 4; @@ -606,7 +633,11 @@ impl BinaryEncoder for Variant { let mut values: Vec = Vec::with_capacity(array_length as usize); for _ in 0..array_length { - values.push(Variant::decode_variant_value(stream, element_encoding_mask, decoding_limits)?); + values.push(Variant::decode_variant_value( + stream, + element_encoding_mask, + decoding_limits, + )?); } if encoding_mask & ARRAY_DIMENSIONS_BIT != 0 { if let Some(dimensions) = read_array(stream, decoding_limits)? { @@ -614,9 +645,13 @@ impl BinaryEncoder for Variant { error!("Invalid array dimensions"); Err(StatusCode::BadDecodingError) } else { - let array_dimensions_length = dimensions.iter().fold(1u32, |sum, d| sum * *d); + let array_dimensions_length = + dimensions.iter().fold(1u32, |sum, d| sum * *d); if array_dimensions_length != array_length as u32 { - error!("Array dimensions does not match array length {}", array_length); + error!( + "Array dimensions does not match array length {}", + array_length + ); Err(StatusCode::BadDecodingError) } else { Ok(Variant::from((values, dimensions))) @@ -664,7 +699,7 @@ impl fmt::Display for Variant { Variant::String(ref v) => write!(f, "{}", v.to_string()), Variant::Guid(ref v) => write!(f, "{}", v.to_string()), Variant::DateTime(ref v) => write!(f, "{}", v.to_string()), - value => write!(f, "{:?}", value) + value => write!(f, "{:?}", value), } } } @@ -742,7 +777,11 @@ impl Variant { } /// Reads just the variant value from the stream - fn decode_variant_value(stream: &mut S, encoding_mask: u8, decoding_limits: &DecodingLimits) -> EncodingResult { + fn decode_variant_value( + stream: &mut S, + encoding_mask: u8, + decoding_limits: &DecodingLimits, + ) -> EncodingResult { let result = if encoding_mask == 0 { Variant::Empty } else if Self::test_encoding_flag(encoding_mask, DataTypeId::Boolean) { @@ -802,19 +841,17 @@ impl Variant { let result = self.convert(target_type); if result == Variant::Empty { match *self { - Variant::Boolean(v) => { - match target_type { - VariantTypeId::String => UAString::from(if v { "true" } else { "false" }).into(), - _ => Variant::Empty - } - } - Variant::Byte(v) => { - match target_type { - VariantTypeId::Boolean => cast_to_bool!(v), - VariantTypeId::String => format!("{}", v).into(), - _ => Variant::Empty + Variant::Boolean(v) => match target_type { + VariantTypeId::String => { + UAString::from(if v { "true" } else { "false" }).into() } - } + _ => Variant::Empty, + }, + Variant::Byte(v) => match target_type { + VariantTypeId::Boolean => cast_to_bool!(v), + VariantTypeId::String => format!("{}", v).into(), + _ => Variant::Empty, + }, Variant::Double(v) => { // Truncated value used in integer conversions let vt = f64::trunc(v + 0.5); @@ -830,27 +867,23 @@ impl Variant { VariantTypeId::UInt16 => cast_to_integer!(vt, f64, u16), VariantTypeId::UInt32 => cast_to_integer!(vt, f64, u32), VariantTypeId::UInt64 => cast_to_integer!(vt, f64, u64), - _ => Variant::Empty - } - } - Variant::ByteString(ref v) => { - match target_type { - VariantTypeId::Guid => Guid::try_from(v).map(|v| v.into()).unwrap_or(Variant::Empty), - _ => Variant::Empty - } - } - Variant::DateTime(ref v) => { - match target_type { - VariantTypeId::String => format!("{}", *v).into(), - _ => Variant::Empty - } - } - Variant::ExpandedNodeId(ref v) => { - match target_type { - VariantTypeId::NodeId => v.node_id.clone().into(), - _ => Variant::Empty + _ => Variant::Empty, } } + Variant::ByteString(ref v) => match target_type { + VariantTypeId::Guid => Guid::try_from(v) + .map(|v| v.into()) + .unwrap_or(Variant::Empty), + _ => Variant::Empty, + }, + Variant::DateTime(ref v) => match target_type { + VariantTypeId::String => format!("{}", *v).into(), + _ => Variant::Empty, + }, + Variant::ExpandedNodeId(ref v) => match target_type { + VariantTypeId::NodeId => v.node_id.clone().into(), + _ => Variant::Empty, + }, Variant::Float(v) => { let vt = f32::trunc(v + 0.5); match target_type { @@ -864,135 +897,133 @@ impl Variant { VariantTypeId::UInt16 => cast_to_integer!(vt, f32, u16), VariantTypeId::UInt32 => cast_to_integer!(vt, f32, u32), VariantTypeId::UInt64 => cast_to_integer!(vt, f32, u64), - _ => Variant::Empty - } - } - Variant::Guid(ref v) => { - match target_type { - VariantTypeId::String => format!("{}", *v).into(), - VariantTypeId::ByteString => ByteString::from(v.as_ref().clone()).into(), - _ => Variant::Empty - } - } - Variant::Int16(v) => { - match target_type { - VariantTypeId::Boolean => cast_to_bool!(v), - VariantTypeId::Byte => cast_to_integer!(v, i16, u8), - VariantTypeId::SByte => cast_to_integer!(v, i16, i8), - VariantTypeId::String => format!("{}", v).into(), - VariantTypeId::UInt16 => cast_to_integer!(v, i16, u16), - _ => Variant::Empty - } - } - Variant::Int32(v) => { - match target_type { - VariantTypeId::Boolean => cast_to_bool!(v), - VariantTypeId::Byte => cast_to_integer!(v, i32, u8), - VariantTypeId::Int16 => cast_to_integer!(v, i32, i16), - VariantTypeId::SByte => cast_to_integer!(v, i32, i8), - VariantTypeId::StatusCode => (StatusCode::from_bits_truncate(v as u32)).into(), - VariantTypeId::String => format!("{}", v).into(), - VariantTypeId::UInt16 => cast_to_integer!(v, i32, u16), - VariantTypeId::UInt32 => cast_to_integer!(v, i32, u32), - _ => Variant::Empty + _ => Variant::Empty, } } - Variant::Int64(v) => { - match target_type { - VariantTypeId::Boolean => cast_to_bool!(v), - VariantTypeId::Byte => cast_to_integer!(v, i64, u8), - VariantTypeId::Int16 => cast_to_integer!(v, i64, i16), - VariantTypeId::Int32 => cast_to_integer!(v, i64, i32), - VariantTypeId::SByte => cast_to_integer!(v, i64, i8), - VariantTypeId::StatusCode => StatusCode::from_bits_truncate(v as u32).into(), - VariantTypeId::String => format!("{}", v).into(), - VariantTypeId::UInt16 => cast_to_integer!(v, i64, u16), - VariantTypeId::UInt32 => cast_to_integer!(v, i64, u32), - VariantTypeId::UInt64 => cast_to_integer!(v, i64, u64), - _ => Variant::Empty - } - } - Variant::SByte(v) => { - match target_type { - VariantTypeId::Boolean => cast_to_bool!(v), - VariantTypeId::Byte => cast_to_integer!(v, i8, u8), - VariantTypeId::String => format!("{}", v).into(), - _ => Variant::Empty - } - } - Variant::StatusCode(v) => { - match target_type { - VariantTypeId::UInt16 => (((v.bits() & 0xffff_0000) >> 16) as u16).into(), - _ => Variant::Empty - } - } - Variant::String(ref v) => { - match target_type { - VariantTypeId::NodeId => if v.is_null() { + Variant::Guid(ref v) => match target_type { + VariantTypeId::String => format!("{}", *v).into(), + VariantTypeId::ByteString => ByteString::from(v.as_ref().clone()).into(), + _ => Variant::Empty, + }, + Variant::Int16(v) => match target_type { + VariantTypeId::Boolean => cast_to_bool!(v), + VariantTypeId::Byte => cast_to_integer!(v, i16, u8), + VariantTypeId::SByte => cast_to_integer!(v, i16, i8), + VariantTypeId::String => format!("{}", v).into(), + VariantTypeId::UInt16 => cast_to_integer!(v, i16, u16), + _ => Variant::Empty, + }, + Variant::Int32(v) => match target_type { + VariantTypeId::Boolean => cast_to_bool!(v), + VariantTypeId::Byte => cast_to_integer!(v, i32, u8), + VariantTypeId::Int16 => cast_to_integer!(v, i32, i16), + VariantTypeId::SByte => cast_to_integer!(v, i32, i8), + VariantTypeId::StatusCode => (StatusCode::from_bits_truncate(v as u32)).into(), + VariantTypeId::String => format!("{}", v).into(), + VariantTypeId::UInt16 => cast_to_integer!(v, i32, u16), + VariantTypeId::UInt32 => cast_to_integer!(v, i32, u32), + _ => Variant::Empty, + }, + Variant::Int64(v) => match target_type { + VariantTypeId::Boolean => cast_to_bool!(v), + VariantTypeId::Byte => cast_to_integer!(v, i64, u8), + VariantTypeId::Int16 => cast_to_integer!(v, i64, i16), + VariantTypeId::Int32 => cast_to_integer!(v, i64, i32), + VariantTypeId::SByte => cast_to_integer!(v, i64, i8), + VariantTypeId::StatusCode => StatusCode::from_bits_truncate(v as u32).into(), + VariantTypeId::String => format!("{}", v).into(), + VariantTypeId::UInt16 => cast_to_integer!(v, i64, u16), + VariantTypeId::UInt32 => cast_to_integer!(v, i64, u32), + VariantTypeId::UInt64 => cast_to_integer!(v, i64, u64), + _ => Variant::Empty, + }, + Variant::SByte(v) => match target_type { + VariantTypeId::Boolean => cast_to_bool!(v), + VariantTypeId::Byte => cast_to_integer!(v, i8, u8), + VariantTypeId::String => format!("{}", v).into(), + _ => Variant::Empty, + }, + Variant::StatusCode(v) => match target_type { + VariantTypeId::UInt16 => (((v.bits() & 0xffff_0000) >> 16) as u16).into(), + _ => Variant::Empty, + }, + Variant::String(ref v) => match target_type { + VariantTypeId::NodeId => { + if v.is_null() { Variant::Empty } else { - NodeId::from_str(v.as_ref()).map(|v| v.into()).unwrap_or(Variant::Empty) + NodeId::from_str(v.as_ref()) + .map(|v| v.into()) + .unwrap_or(Variant::Empty) } - VariantTypeId::ExpandedNodeId => if v.is_null() { + } + VariantTypeId::ExpandedNodeId => { + if v.is_null() { Variant::Empty } else { - ExpandedNodeId::from_str(v.as_ref()).map(|v| v.into()).unwrap_or(Variant::Empty) + ExpandedNodeId::from_str(v.as_ref()) + .map(|v| v.into()) + .unwrap_or(Variant::Empty) } - VariantTypeId::DateTime => if v.is_null() { + } + VariantTypeId::DateTime => { + if v.is_null() { Variant::Empty } else { - DateTime::from_str(v.as_ref()).map(|v| v.into()).unwrap_or(Variant::Empty) + DateTime::from_str(v.as_ref()) + .map(|v| v.into()) + .unwrap_or(Variant::Empty) } - VariantTypeId::LocalizedText => if v.is_null() { + } + VariantTypeId::LocalizedText => { + if v.is_null() { LocalizedText::null().into() } else { LocalizedText::new("", v.as_ref()).into() } - VariantTypeId::QualifiedName => if v.is_null() { + } + VariantTypeId::QualifiedName => { + if v.is_null() { QualifiedName::null().into() } else { QualifiedName::new(0, v.as_ref()).into() } - _ => Variant::Empty } - } - Variant::UInt16(v) => { - match target_type { - VariantTypeId::Boolean => cast_to_bool!(v), - VariantTypeId::Byte => cast_to_integer!(v, u16, u8), - VariantTypeId::SByte => cast_to_integer!(v, u16, i8), - VariantTypeId::String => format!("{}", v).into(), - _ => Variant::Empty - } - } - Variant::UInt32(v) => { - match target_type { - VariantTypeId::Boolean => cast_to_bool!(v), - VariantTypeId::Byte => cast_to_integer!(v, u32, u8), - VariantTypeId::Int16 => cast_to_integer!(v, u32, i16), - VariantTypeId::SByte => cast_to_integer!(v, u32, i8), - VariantTypeId::StatusCode => StatusCode::from_bits_truncate(v).into(), - VariantTypeId::String => format!("{}", v).into(), - VariantTypeId::UInt16 => cast_to_integer!(v, u32, u16), - _ => Variant::Empty - } - } - Variant::UInt64(v) => { - match target_type { - VariantTypeId::Boolean => cast_to_bool!(v), - VariantTypeId::Byte => cast_to_integer!(v, u64, u8), - VariantTypeId::Int16 => cast_to_integer!(v, u64, i16), - VariantTypeId::SByte => cast_to_integer!(v, u64, i8), - VariantTypeId::StatusCode => StatusCode::from_bits_truncate((v & 0x0000_0000_ffff_ffff) as u32).into(), - VariantTypeId::String => format!("{}", v).into(), - VariantTypeId::UInt16 => cast_to_integer!(v, u64, u16), - VariantTypeId::UInt32 => cast_to_integer!(v, u64, u32), - _ => Variant::Empty + _ => Variant::Empty, + }, + Variant::UInt16(v) => match target_type { + VariantTypeId::Boolean => cast_to_bool!(v), + VariantTypeId::Byte => cast_to_integer!(v, u16, u8), + VariantTypeId::SByte => cast_to_integer!(v, u16, i8), + VariantTypeId::String => format!("{}", v).into(), + _ => Variant::Empty, + }, + Variant::UInt32(v) => match target_type { + VariantTypeId::Boolean => cast_to_bool!(v), + VariantTypeId::Byte => cast_to_integer!(v, u32, u8), + VariantTypeId::Int16 => cast_to_integer!(v, u32, i16), + VariantTypeId::SByte => cast_to_integer!(v, u32, i8), + VariantTypeId::StatusCode => StatusCode::from_bits_truncate(v).into(), + VariantTypeId::String => format!("{}", v).into(), + VariantTypeId::UInt16 => cast_to_integer!(v, u32, u16), + _ => Variant::Empty, + }, + Variant::UInt64(v) => match target_type { + VariantTypeId::Boolean => cast_to_bool!(v), + VariantTypeId::Byte => cast_to_integer!(v, u64, u8), + VariantTypeId::Int16 => cast_to_integer!(v, u64, i16), + VariantTypeId::SByte => cast_to_integer!(v, u64, i8), + VariantTypeId::StatusCode => { + StatusCode::from_bits_truncate((v & 0x0000_0000_ffff_ffff) as u32).into() } - } + VariantTypeId::String => format!("{}", v).into(), + VariantTypeId::UInt16 => cast_to_integer!(v, u64, u16), + VariantTypeId::UInt32 => cast_to_integer!(v, u64, u32), + _ => Variant::Empty, + }, // NodeId, LocalizedText, QualifiedName, XmlElement have no explicit cast - _ => Variant::Empty + _ => Variant::Empty, } } else { result @@ -1020,126 +1051,124 @@ impl Variant { VariantTypeId::UInt16 => (v as u16).into(), VariantTypeId::UInt32 => (v as u32).into(), VariantTypeId::UInt64 => (v as u64).into(), - _ => Variant::Empty - } - } - Variant::Byte(v) => { - match target_type { - VariantTypeId::Double => (v as f64).into(), - VariantTypeId::Float => (v as f32).into(), - VariantTypeId::Int16 => (v as i16).into(), - VariantTypeId::Int32 => (v as i32).into(), - VariantTypeId::Int64 => (v as i64).into(), - VariantTypeId::SByte => (v as i8).into(), - VariantTypeId::UInt16 => (v as u16).into(), - VariantTypeId::UInt32 => (v as u32).into(), - VariantTypeId::UInt64 => (v as u64).into(), - _ => Variant::Empty + _ => Variant::Empty, } } + Variant::Byte(v) => match target_type { + VariantTypeId::Double => (v as f64).into(), + VariantTypeId::Float => (v as f32).into(), + VariantTypeId::Int16 => (v as i16).into(), + VariantTypeId::Int32 => (v as i32).into(), + VariantTypeId::Int64 => (v as i64).into(), + VariantTypeId::SByte => (v as i8).into(), + VariantTypeId::UInt16 => (v as u16).into(), + VariantTypeId::UInt32 => (v as u32).into(), + VariantTypeId::UInt64 => (v as u64).into(), + _ => Variant::Empty, + }, // ByteString - everything is X or E except to itself // DateTime - everything is X or E except to itself // Double - everything is X or E except to itself - Variant::ExpandedNodeId(ref v) => { // Everything is X or E except to String match target_type { VariantTypeId::String => format!("{}", v).into(), - _ => Variant::Empty + _ => Variant::Empty, } } Variant::Float(v) => { // Everything is X or E except to Double match target_type { VariantTypeId::Double => (v as f64).into(), - _ => Variant::Empty + _ => Variant::Empty, } } // Guid - everything is X or E except to itself - - Variant::Int16(v) => { - match target_type { - VariantTypeId::Double => (v as f64).into(), - VariantTypeId::Float => (v as f32).into(), - VariantTypeId::Int32 => (v as i32).into(), - VariantTypeId::Int64 => (v as i64).into(), - VariantTypeId::UInt32 => if v < 0 { + Variant::Int16(v) => match target_type { + VariantTypeId::Double => (v as f64).into(), + VariantTypeId::Float => (v as f32).into(), + VariantTypeId::Int32 => (v as i32).into(), + VariantTypeId::Int64 => (v as i64).into(), + VariantTypeId::UInt32 => { + if v < 0 { Variant::Empty } else { (v as u32).into() - }, - VariantTypeId::UInt64 => if v < 0 { + } + } + VariantTypeId::UInt64 => { + if v < 0 { Variant::Empty } else { (v as u64).into() - }, - _ => Variant::Empty + } } - } - Variant::Int32(v) => { - match target_type { - VariantTypeId::Double => (v as f64).into(), - VariantTypeId::Float => (v as f32).into(), - VariantTypeId::Int64 => (v as i64).into(), - VariantTypeId::UInt64 => if v < 0 { + _ => Variant::Empty, + }, + Variant::Int32(v) => match target_type { + VariantTypeId::Double => (v as f64).into(), + VariantTypeId::Float => (v as f32).into(), + VariantTypeId::Int64 => (v as i64).into(), + VariantTypeId::UInt64 => { + if v < 0 { Variant::Empty } else { (v as u64).into() - }, - _ => Variant::Empty - } - } - Variant::Int64(v) => { - match target_type { - VariantTypeId::Double => (v as f64).into(), - VariantTypeId::Float => (v as f32).into(), - _ => Variant::Empty + } } - } + _ => Variant::Empty, + }, + Variant::Int64(v) => match target_type { + VariantTypeId::Double => (v as f64).into(), + VariantTypeId::Float => (v as f32).into(), + _ => Variant::Empty, + }, Variant::NodeId(ref v) => { // Guid - everything is X or E except to ExpandedNodeId and String match target_type { VariantTypeId::ExpandedNodeId => ExpandedNodeId::from(*v.clone()).into(), VariantTypeId::String => format!("{}", v).into(), - _ => Variant::Empty + _ => Variant::Empty, } } - Variant::SByte(v) => { - match target_type { - VariantTypeId::Double => (v as f64).into(), - VariantTypeId::Float => (v as f32).into(), - VariantTypeId::Int16 => (v as i16).into(), - VariantTypeId::Int32 => (v as i32).into(), - VariantTypeId::Int64 => (v as i64).into(), - VariantTypeId::UInt16 => if v < 0 { + Variant::SByte(v) => match target_type { + VariantTypeId::Double => (v as f64).into(), + VariantTypeId::Float => (v as f32).into(), + VariantTypeId::Int16 => (v as i16).into(), + VariantTypeId::Int32 => (v as i32).into(), + VariantTypeId::Int64 => (v as i64).into(), + VariantTypeId::UInt16 => { + if v < 0 { Variant::Empty } else { (v as u16).into() - }, - VariantTypeId::UInt32 => if v < 0 { + } + } + VariantTypeId::UInt32 => { + if v < 0 { Variant::Empty } else { (v as u32).into() - }, - VariantTypeId::UInt64 => if v < 0 { + } + } + VariantTypeId::UInt64 => { + if v < 0 { Variant::Empty } else { (v as u64).into() - }, - _ => Variant::Empty - } - } - Variant::StatusCode(v) => { - match target_type { - VariantTypeId::Int32 => (v.bits() as i32).into(), - VariantTypeId::Int64 => (v.bits() as i64).into(), - VariantTypeId::UInt32 => (v.bits() as u32).into(), - VariantTypeId::UInt64 => (v.bits() as u64).into(), - _ => Variant::Empty + } } - } + _ => Variant::Empty, + }, + Variant::StatusCode(v) => match target_type { + VariantTypeId::Int32 => (v.bits() as i32).into(), + VariantTypeId::Int64 => (v.bits() as i64).into(), + VariantTypeId::UInt32 => (v.bits() as u32).into(), + VariantTypeId::UInt64 => (v.bits() as u64).into(), + _ => Variant::Empty, + }, Variant::String(ref v) => { if v.is_empty() { Variant::Empty @@ -1167,9 +1196,9 @@ impl Variant { VariantTypeId::Float => { f32::from_str(v).map(|v| v.into()).unwrap_or(Variant::Empty) } - VariantTypeId::Guid => { - Guid::from_str(v).map(|v| v.into()).unwrap_or(Variant::Empty) - } + VariantTypeId::Guid => Guid::from_str(v) + .map(|v| v.into()) + .unwrap_or(Variant::Empty), VariantTypeId::Int16 => { i16::from_str(v).map(|v| v.into()).unwrap_or(Variant::Empty) } @@ -1179,9 +1208,9 @@ impl Variant { VariantTypeId::Int64 => { i64::from_str(v).map(|v| v.into()).unwrap_or(Variant::Empty) } - VariantTypeId::NodeId => { - NodeId::from_str(v).map(|v| v.into()).unwrap_or(Variant::Empty) - } + VariantTypeId::NodeId => NodeId::from_str(v) + .map(|v| v.into()) + .unwrap_or(Variant::Empty), VariantTypeId::SByte => { i8::from_str(v).map(|v| v.into()).unwrap_or(Variant::Empty) } @@ -1194,16 +1223,14 @@ impl Variant { VariantTypeId::UInt64 => { u64::from_str(v).map(|v| v.into()).unwrap_or(Variant::Empty) } - _ => Variant::Empty + _ => Variant::Empty, } } } - Variant::LocalizedText(ref v) => { - match target_type { - VariantTypeId::String => v.text.clone().into(), - _ => Variant::Empty - } - } + Variant::LocalizedText(ref v) => match target_type { + VariantTypeId::String => v.text.clone().into(), + _ => Variant::Empty, + }, Variant::QualifiedName(ref v) => { match target_type { VariantTypeId::String => { @@ -1222,7 +1249,7 @@ impl Variant { LocalizedText::new("", v.name.as_ref()).into() } } - _ => Variant::Empty + _ => Variant::Empty, } } Variant::UInt16(v) => { @@ -1238,33 +1265,29 @@ impl Variant { } VariantTypeId::UInt32 => (v as u32).into(), VariantTypeId::UInt64 => (v as u64).into(), - _ => Variant::Empty - } - } - Variant::UInt32(v) => { - match target_type { - VariantTypeId::Double => (v as f64).into(), - VariantTypeId::Float => (v as f32).into(), - VariantTypeId::Int32 => (v as i32).into(), - VariantTypeId::Int64 => (v as i64).into(), - VariantTypeId::UInt64 => (v as u64).into(), - _ => Variant::Empty - } - } - Variant::UInt64(v) => { - match target_type { - VariantTypeId::Double => (v as f64).into(), - VariantTypeId::Float => (v as f32).into(), - VariantTypeId::Int64 => (v as i64).into(), - _ => Variant::Empty + _ => Variant::Empty, } } + Variant::UInt32(v) => match target_type { + VariantTypeId::Double => (v as f64).into(), + VariantTypeId::Float => (v as f32).into(), + VariantTypeId::Int32 => (v as i32).into(), + VariantTypeId::Int64 => (v as i64).into(), + VariantTypeId::UInt64 => (v as u64).into(), + _ => Variant::Empty, + }, + Variant::UInt64(v) => match target_type { + VariantTypeId::Double => (v as f64).into(), + VariantTypeId::Float => (v as f32).into(), + VariantTypeId::Int64 => (v as i64).into(), + _ => Variant::Empty, + }, Variant::Array(_) => { // TODO Arrays including converting array of length 1 to scalar of same type Variant::Empty } // XmlElement everything is X - _ => Variant::Empty + _ => Variant::Empty, } } @@ -1300,12 +1323,17 @@ impl Variant { /// Tests and returns true if the variant holds a numeric type pub fn is_numeric(&self) -> bool { match self { - Variant::SByte(_) | Variant::Byte(_) | - Variant::Int16(_) | Variant::UInt16(_) | - Variant::Int32(_) | Variant::UInt32(_) | - Variant::Int64(_) | Variant::UInt64(_) | - Variant::Float(_) | Variant::Double(_) => true, - _ => false + Variant::SByte(_) + | Variant::Byte(_) + | Variant::Int16(_) + | Variant::UInt16(_) + | Variant::Int32(_) + | Variant::UInt32(_) + | Variant::Int64(_) + | Variant::UInt64(_) + | Variant::Float(_) + | Variant::Double(_) => true, + _ => false, } } @@ -1313,19 +1341,15 @@ impl Variant { pub fn is_array(&self) -> bool { match self { Variant::Array(_) => true, - _ => false + _ => false, } } pub fn is_array_of_type(&self, variant_type: VariantTypeId) -> bool { // A non-numeric value in the array means it is not numeric match self { - Variant::Array(array) => { - values_are_of_type(array.values.as_slice(), variant_type) - } - _ => { - false - } + Variant::Array(array) => values_are_of_type(array.values.as_slice(), variant_type), + _ => false, } } @@ -1335,7 +1359,7 @@ impl Variant { pub fn is_valid(&self) -> bool { match self { Variant::Array(array) => array.is_valid(), - _ => true + _ => true, } } @@ -1358,9 +1382,7 @@ impl Variant { } Variant::Float(value) => Some(value as f64), Variant::Double(value) => Some(value), - _ => { - None - } + _ => None, } } @@ -1376,7 +1398,7 @@ impl Variant { array.values[0].scalar_data_type() } } - _ => None + _ => None, } } @@ -1404,7 +1426,7 @@ impl Variant { Variant::StatusCode(_) => Some(DataTypeId::StatusCode.into()), Variant::QualifiedName(_) => Some(DataTypeId::QualifiedName.into()), Variant::LocalizedText(_) => Some(DataTypeId::LocalizedText.into()), - _ => None + _ => None, } } @@ -1453,33 +1475,29 @@ impl Variant { /// single array of bytes pub fn to_byte_array(&self) -> Self { match self { - Variant::ByteString(values) => { - match &values.value { - None => Variant::from(Array::new_single(vec![])), - Some(values) => { - let values: Vec = values.iter().map(|v| Variant::Byte(*v)).collect(); - Variant::from(Array::new_single(values)) - } + Variant::ByteString(values) => match &values.value { + None => Variant::from(Array::new_single(vec![])), + Some(values) => { + let values: Vec = values.iter().map(|v| Variant::Byte(*v)).collect(); + Variant::from(Array::new_single(values)) } - } - _ => panic!() + }, + _ => panic!(), } } /// This function returns a substring of a ByteString or a UAString fn substring(&self, min: usize, max: usize) -> Result { match self { - Variant::ByteString(v) => { - v.substring(min, max) - .map(|v| v.into()) - .map_err(|_| StatusCode::BadIndexRangeNoData) - } - Variant::String(v) => { - v.substring(min, max) - .map(|v| v.into()) - .map_err(|_| StatusCode::BadIndexRangeNoData) - } - _ => panic!("Should not be calling substring on other types") + Variant::ByteString(v) => v + .substring(min, max) + .map(|v| v.into()) + .map_err(|_| StatusCode::BadIndexRangeNoData), + Variant::String(v) => v + .substring(min, max) + .map(|v| v.into()) + .map_err(|_| StatusCode::BadIndexRangeNoData), + _ => panic!("Should not be calling substring on other types"), } } @@ -1522,9 +1540,7 @@ impl Variant { Variant::Array(ref mut array) => { let values = &mut array.values; match range { - NumericRange::None => { - Err(StatusCode::BadIndexRangeNoData) - } + NumericRange::None => Err(StatusCode::BadIndexRangeNoData), NumericRange::Index(idx) => { let idx = idx as usize; if idx >= values.len() { @@ -1544,7 +1560,8 @@ impl Variant { // Possibly this could splice or something but it's trying to copy elements // until either the source or destination array is finished. let mut idx = min; - while idx < values.len() && idx <= max && idx - min < other_values.len() { + while idx < values.len() && idx <= max && idx - min < other_values.len() + { values[idx] = other_values[idx - min].clone(); idx += 1; } @@ -1573,9 +1590,7 @@ impl Variant { NumericRange::Index(idx) => { let idx = idx as usize; match self { - Variant::String(_) | Variant::ByteString(_) => { - self.substring(idx, idx) - } + Variant::String(_) | Variant::ByteString(_) => self.substring(idx, idx), Variant::Array(array) => { // Get value at the index (or not) let values = &array.values; @@ -1586,28 +1601,30 @@ impl Variant { Err(StatusCode::BadIndexRangeNoData) } } - _ => Err(StatusCode::BadIndexRangeNoData) + _ => Err(StatusCode::BadIndexRangeNoData), } } NumericRange::Range(min, max) => { let (min, max) = (min as usize, max as usize); match self { - Variant::String(_) | Variant::ByteString(_) => { - self.substring(min, max) - } + Variant::String(_) | Variant::ByteString(_) => self.substring(min, max), Variant::Array(array) => { let values = &array.values; if min >= values.len() { // Min must be in range Err(StatusCode::BadIndexRangeNoData) } else { - let max = if max >= values.len() { values.len() - 1 } else { max }; + let max = if max >= values.len() { + values.len() - 1 + } else { + max + }; let vals = &values[min as usize..=max]; let vals: Vec = vals.iter().map(|v| v.clone()).collect(); Ok(Variant::from(vals)) } } - _ => Err(StatusCode::BadIndexRangeNoData) + _ => Err(StatusCode::BadIndexRangeNoData), } } NumericRange::MultipleRanges(_ranges) => {