Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.
def test_search_to_dataframe(self):
rotterdam = [
g.id
for g in Geographies().search("rotterdam").to_list()
if "port" in g.layer
]
df = (
VesselMovements()
.search(
filter_time_min=datetime(2017, 10, 1, 0, 0),
filter_time_max=datetime(2017, 10, 1, 0, 10),
filter_origins=rotterdam,
)
.to_df()
.head(2)
)
assert list(df.columns) == vessel_movements_result.DEFAULT_COLUMNS
assert len(df) == 2
cols = [
"vessel_movement_id",
"vessel.name",
"start_timestamp",
"end_timestamp",
"origin.location.country.id",
"origin.location.country.label",
"destination.location.country.id",
"destination.location.country.label",
"cargoes.0.product.group.label",
"vessel.corporate_entities.charterer.id",
"vessel.corporate_entities.charterer.label",
]
df = (
VesselMovements()
.search(
filter_origins=meg,
exclude_origins=iraq,
exclude_charterers=bahri[0],
filter_time_min=datetime(2019, 10, 15),
filter_time_max=datetime(2019, 11, 1),
)
.to_df(columns=cols)
)
mask = (df["origin.location.country.id"] == iraq[0]) | (
df["vessel.corporate_entities.charterer.id"] == bahri[0]
)
df_excl = df.loc[mask]
assert df_excl.empty
def test_search_vessel_status(self):
v = VesselMovements().search(
filter_time_min=datetime(2019, 10, 1),
filter_time_max=datetime(2019, 10, 15),
filter_vessel_classes=["vlcc"],
filter_vessel_status="vessel_status_ballast",
)
assert len(v) > 50
def test_filter_activity(self):
df = (
VesselMovements()
.search(
filter_activity="storing_state",
filter_time_min=datetime(2017, 10, 1),
filter_time_max=datetime(2017, 10, 1),
)
.to_df()
.head(2)
)
print(df.head())
assert len(df) == 2
def test_search_to_dataframe_subset_of_columns(self):
cols = ["vessel.imo", "vessel.name"]
rotterdam = [
g.id
for g in Geographies().search("rotterdam").to_list()
if "port" in g.layer
]
df = (
VesselMovements()
.search(
filter_time_min=datetime(2017, 10, 1, 0, 0),
filter_time_max=datetime(2017, 10, 1, 0, 10),
filter_origins=rotterdam,
)
.to_df(columns=cols)
.head(2)
)
assert list(df.columns) == cols
assert len(df) == 2
def test_age_flag_scrubbers_filters(self):
panama = [
g.id
for g in Geographies().search("panama").to_list()
if "country" in g.layer
]
df = (
VesselMovements()
.search(
filter_vessel_scrubbers="inc",
filter_vessel_age_min=2,
filter_vessel_age_max=15,
filter_vessel_flags=panama,
filter_time_min=datetime(2017, 10, 1),
filter_time_max=datetime(2017, 10, 1),
)
.to_df()
.head(2)
)
print(df.head())
assert len(df) == 2
def test_to_df_all_columns(self):
rotterdam = [
g.id
for g in Geographies().search("rotterdam").to_list()
if "port" in g.layer
]
df = (
VesselMovements()
.search(
filter_time_min=datetime(2017, 10, 1, 0, 0),
filter_time_max=datetime(2017, 10, 1, 0, 10),
filter_origins=rotterdam,
)
.to_df(columns="all")
.head(2)
)
assert len(df) == 2
| 10 | ABDIAS NASCIMENTO | 9.4539e+06 | 710032990 | 171000 | 157055 | suezmax | Marlim Sul Field [BR] | nan | nan | nan | Sao Francisco Do Sul, SC [BR] | nan | nan | nan | 2017-09-28T18:29:45+0000 | 2017-10-04T23:05:32+0000 | Crude | nan | nan | nan | nan | PETROBRAS |
| 11 | ABIOLA | 8.61943e+06 | 657995000 | 47261 | 35644 | handysize | Port Harcourt [NG] | nan | nan | nan | nan | nan | nan | nan | 2014-12-02T10:20:03+0000 | nan | Clean products | Full Range | 0.490481 | nan | nan | nan |
| 13 | ABLIANI | 9.69307e+06 | 256903000 | 124518 | 109999 | aframax | Ceyhan [TR] | nan | nan | nan | Sarroch (Porto Foxi) [IT] | nan | nan | nan | 2017-09-26T21:33:43+0000 | 2017-10-03T15:45:15+0000 | Crude | Azeri Light | 1 | nan | nan | Eastern Mediterranean Maritime Ltd |
| 19 | AC-D | 9.42844e+06 | 256934000 | 8628 | 7842 | tiny_tanker | Varna [BG] | nan | nan | nan | Valencia [ES] | nan | nan | nan | 2017-09-20T08:00:58+0000 | 2017-10-06T15:49:00+0000 | Clean products | Finished Biodiesel | 0.868073 | nan | nan | nan |
| 20 | ACACIA | 9.4766e+06 | 371044000 | 14570 | 13566 | general_purpose | Bontang, KL [ID] | nan | nan | nan | Lianyungang [CN] | nan | nan | nan | 2017-09-28T13:13:57+0000 | 2017-10-07T00:30:46+0000 | Clean products | Chemicals | 0.999186 | nan | nan | KOKUKA SANGYO |
| 22 | ACACIA RUBRA | 9.46853e+06 | 249374000 | 6000 | 6065 | tiny_tanker | Mosjoen [NO] | nan | nan | nan | Sigerfjord [NO] | nan | nan | nan | 2017-10-01T00:01:53+0000 | 2017-10-06T10:53:20+0000 | Dirty products | nan | nan | nan | nan | nan |
"""
from datetime import datetime
from vortexasdk import VesselMovements
if __name__ == "__main__":
# Query the API
search_result = VesselMovements().search(
filter_time_min=datetime(2017, 10, 1, 0),
filter_time_max=datetime(2017, 10, 1, 1),
)
# A complete list of available columns can be found at https://vortechsa.github.io/python-sdk/endpoints/vessel_movements/#notes
# We only require a subset of available columns here
required_columns = [
# Show metadata about the vessel
"vessel.name",
"vessel.imo",
"vessel.mmsi",
"vessel.cubic_capacity",
"vessel.dwt",
"vessel.vessel_class",
# Show any corporate information associated with the vessel
"vessel.corporate_entities.charterer.label",
"""
Let's find some ballast movements
"""
from datetime import datetime
from vortexasdk import VesselMovements
if __name__ == "__main__":
# Query the API
search_result = VesselMovements().search(
filter_time_min=datetime(2017, 10, 1, 0),
filter_time_max=datetime(2017, 10, 1, 1),
filter_vessel_status="vessel_status_ballast",
)
# Convert the search result to a dataframe
ballast_movements = search_result.to_df()
return [c.id for c in corporations_with_encapsulating_names if c.name.upper() == corporation_name.upper()]
# We could have chosen to keep the other corporations (with similar, but not exact names) like so:
# return [c.id for c in corporations_with_encapsulating_names]
if __name__ == "__main__":
# Read our excel sheet of charterers into a dataframe
charterers_df = pd.read_excel("./resources/my_charterers.xlsx")
# Convert the charterer names into ids
charterers_list_of_lists = charterers_df['charterers'].apply(convert_to_corporation_ids).to_list()
charterers = [item for sublist in charterers_list_of_lists for item in sublist]
# Query API
df = VesselMovements().search(
filter_charterers=charterers,
filter_time_min=datetime.now() - timedelta(weeks=1),
filter_time_max=datetime.now(),
).to_df()
print(df)