Cannot import relationships properly from Neo4j sample Movie database and can;t select columns in Pyspark

spark: 3.0.3
pyspark: 3.0.3
scala: 2.12
neo4j connector: neo4j-connector-apache-spark_2.12:4.0.1_for_spark_3

from pyspark.sql import SparkSession
spark=SparkSession.builder.appName('RS1')
.master("spark://'some-url':7077")
.config('spark.executor.memory','1000m')
.config("spark.jars.packages","graphframes:graphframes:0.8.0-spark3.0-s_2.12,neo4j-contrib:neo4j-connector-apache-spark_2.12:4.0.1_for_spark_3")
.getOrCreate()

dfr = spark.read.format("org.neo4j.spark.DataSource")
.option("url", "bolt://'some-url':7687")
.option("authentication.basic.username", "some-uname'")
.option("authentication.basic.password", "some-pass")
.option("pushdown.columns.enabled", "false")
.option("relationship", "ACTED_IN")
.option("relationship.source.labels", "Person")
.option("relationship.target.labels", "Movie")
.load()

i get this error:
21/07/13 23:55:04 WARN SchemaService: Switching to query schema resolution
21/07/13 23:55:04 WARN SchemaService: Switching to query schema resolution
21/07/13 23:55:04 WARN SchemaService: Switching to query schema resolution
21/07/13 23:55:04 WARN SchemaService: For the following exception
org.neo4j.driver.exceptions.ClientException: Unable to convert scala.collection.immutable.Map$Map1 to Neo4j Value.
at org.neo4j.driver.Values.value(Values.java:134)
at org.neo4j.driver.internal.util.Extract.mapOfValues(Extract.java:203)
at org.neo4j.driver.internal.AbstractQueryRunner.parameters(AbstractQueryRunner.java:69)
at org.neo4j.driver.internal.AbstractQueryRunner.run(AbstractQueryRunner.java:43)
at org.neo4j.spark.service.SchemaService.retrieveSchemaFromApoc(SchemaService.scala:68)
at org.neo4j.spark.service.SchemaService.liftedTree2$1(SchemaService.scala:171)
at org.neo4j.spark.service.SchemaService.structForRelationship(SchemaService.scala:155)
at org.neo4j.spark.service.SchemaService.struct(SchemaService.scala:262)
at org.neo4j.spark.DataSource.$anonfun$inferSchema$1(DataSource.scala:41)
at org.neo4j.spark.DataSource.callSchemaService(DataSource.scala:29)
at org.neo4j.spark.DataSource.inferSchema(DataSource.scala:41)
at org.apache.spark.sql.execution.datasources.v2.DataSourceV2Utils$.getTableFromProvider(DataSourceV2Utils.scala:81)
at org.apache.spark.sql.DataFrameReader.$anonfun$load$1(DataFrameReader.scala:274)
at scala.Option.map(Option.scala:230)
at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:248)
at org.apache.spark.sql.DataFrameReader.load(DataFrameReader.scala:221)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:566)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:238)
at java.base/java.lang.Thread.run(Thread.java:829)

but when I run dfr.columns , I get these.
['<rel.id>', '<rel.type>', '<source.id>', '<source.labels>', 'source.name', 'source.born', '<target.id>', '<target.labels>', 'target.title', 'target.tagline', 'target.released', 'rel.roles']

but I am being able to select any of those columns with dfr.select('<rel.id>')

I have used all of the strategies for push-down filters for columns etc but failed...