@@ -25,7 +25,6 @@ use crate::physical_plan::{displayable, ColumnStatistics, ExecutionPlan, Statist
2525use arrow:: compute:: concat;
2626use arrow:: datatypes:: { Schema , SchemaRef } ;
2727use arrow:: error:: ArrowError ;
28- use arrow:: error:: Result as ArrowResult ;
2928use arrow:: ipc:: writer:: { FileWriter , IpcWriteOptions } ;
3029use arrow:: record_batch:: RecordBatch ;
3130use datafusion_physical_expr:: utils:: ordering_satisfy;
@@ -68,7 +67,7 @@ impl SizedRecordBatchStream {
6867}
6968
7069impl Stream for SizedRecordBatchStream {
71- type Item = ArrowResult < RecordBatch > ;
70+ type Item = Result < RecordBatch > ;
7271
7372 fn poll_next (
7473 mut self : std:: pin:: Pin < & mut Self > ,
@@ -92,10 +91,7 @@ impl RecordBatchStream for SizedRecordBatchStream {
9291
9392/// Create a vector of record batches from a stream
9493pub async fn collect ( stream : SendableRecordBatchStream ) -> Result < Vec < RecordBatch > > {
95- stream
96- . try_collect :: < Vec < _ > > ( )
97- . await
98- . map_err ( DataFusionError :: from)
94+ stream. try_collect :: < Vec < _ > > ( ) . await
9995}
10096
10197/// Merge two record batch references into a single record batch.
@@ -104,15 +100,16 @@ pub fn merge_batches(
104100 first : & RecordBatch ,
105101 second : & RecordBatch ,
106102 schema : SchemaRef ,
107- ) -> ArrowResult < RecordBatch > {
103+ ) -> Result < RecordBatch > {
108104 let columns = ( 0 ..schema. fields . len ( ) )
109105 . map ( |index| {
110106 let first_column = first. column ( index) . as_ref ( ) ;
111107 let second_column = second. column ( index) . as_ref ( ) ;
112108 concat ( & [ first_column, second_column] )
113109 } )
114- . collect :: < ArrowResult < Vec < _ > > > ( ) ?;
115- RecordBatch :: try_new ( schema, columns)
110+ . collect :: < Result < Vec < _ > , ArrowError > > ( )
111+ . map_err ( Into :: < DataFusionError > :: into) ?;
112+ RecordBatch :: try_new ( schema, columns) . map_err ( Into :: into)
116113}
117114
118115/// Merge a slice of record batch references into a single record batch, or
@@ -121,7 +118,7 @@ pub fn merge_batches(
121118pub fn merge_multiple_batches (
122119 batches : & [ & RecordBatch ] ,
123120 schema : SchemaRef ,
124- ) -> ArrowResult < Option < RecordBatch > > {
121+ ) -> Result < Option < RecordBatch > > {
125122 Ok ( if batches. is_empty ( ) {
126123 None
127124 } else {
@@ -134,7 +131,8 @@ pub fn merge_multiple_batches(
134131 . collect :: < Vec < _ > > ( ) ,
135132 )
136133 } )
137- . collect :: < ArrowResult < Vec < _ > > > ( ) ?;
134+ . collect :: < Result < Vec < _ > , ArrowError > > ( )
135+ . map_err ( Into :: < DataFusionError > :: into) ?;
138136 Some ( RecordBatch :: try_new ( schema, columns) ?)
139137 } )
140138}
@@ -190,7 +188,7 @@ fn build_file_list_recurse(
190188/// Spawns a task to the tokio threadpool and writes its outputs to the provided mpsc sender
191189pub ( crate ) fn spawn_execution (
192190 input : Arc < dyn ExecutionPlan > ,
193- output : mpsc:: Sender < ArrowResult < RecordBatch > > ,
191+ output : mpsc:: Sender < Result < RecordBatch > > ,
194192 partition : usize ,
195193 context : Arc < TaskContext > ,
196194) -> JoinHandle < ( ) > {
@@ -199,8 +197,7 @@ pub(crate) fn spawn_execution(
199197 Err ( e) => {
200198 // If send fails, plan being torn down,
201199 // there is no place to send the error.
202- let arrow_error = ArrowError :: ExternalError ( Box :: new ( e) ) ;
203- output. send ( Err ( arrow_error) ) . await . ok ( ) ;
200+ output. send ( Err ( e) ) . await . ok ( ) ;
204201 debug ! (
205202 "Stopping execution: error executing input: {}" ,
206203 displayable( input. as_ref( ) ) . one_line( )
@@ -524,7 +521,7 @@ impl IPCWriter {
524521
525522 /// Finish the writer
526523 pub fn finish ( & mut self ) -> Result < ( ) > {
527- self . writer . finish ( ) . map_err ( DataFusionError :: ArrowError )
524+ self . writer . finish ( ) . map_err ( Into :: into )
528525 }
529526
530527 /// Path write to
0 commit comments