ruff: Fix PLC0206 Extracting value from dictionary without calling .items().

Signed-off-by: Anders Kaseorg <anders@zulip.com>
This commit is contained in:
Anders Kaseorg
2024-12-20 12:18:06 -08:00
committed by Tim Abbott
parent 5bad79dd5b
commit 19b8cde27f
18 changed files with 87 additions and 117 deletions

View File

@@ -79,8 +79,8 @@ def get_realm_day_counts() -> dict[str, dict[str, Markup]]:
return Markup('<td class="number {good_bad}">{cnt}</td>').format(good_bad=good_bad, cnt=cnt)
result = {}
for string_id in counts:
raw_cnts = [counts[string_id].get(age, 0) for age in range(8)]
for string_id, realm_counts in counts.items():
raw_cnts = [realm_counts.get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts[1:])
max_cnt = max(raw_cnts[1:])

View File

@@ -68,9 +68,9 @@ def main() -> None:
metrics = metrics_file["metrics"]
while True:
for metric_id in metrics:
for metric_id, query in metrics.items():
try:
update_metric(metric_id, metrics.get(metric_id), page_id, oauth_token)
update_metric(metric_id, query, page_id, oauth_token)
except Exception as e:
logging.exception(e)
time.sleep(30)

View File

@@ -54,7 +54,7 @@ for line in output.split("\n"):
now = int(time.time())
for queue_name in consumers:
for queue_name, count in consumers.items():
target_count = 1
if queue_name == "notify_tornado":
target_count = TORNADO_PROCESSES
@@ -67,6 +67,6 @@ for queue_name in consumers:
atomic_nagios_write(
"check-rabbitmq-consumers-" + queue_name,
"critical" if consumers[queue_name] < target_count else "ok",
f"queue {queue_name} has {consumers[queue_name]} consumers, needs {target_count}",
"critical" if count < target_count else "ok",
f"queue {queue_name} has {count} consumers, needs {target_count}",
)

View File

@@ -212,17 +212,17 @@ def update_for_legacy_stream_translations(
) -> None:
number_of_updates = 0
updated_translations: dict[str, str] = {}
for line in current:
for line, translation in current.items():
# If the string has a legacy string mapped and see if it's
# not currently translated (e.g. an empty string), then use
# the legacy translated string (which might be an empty string).
if line in LEGACY_STRINGS_MAP and current[line] == "":
if line in LEGACY_STRINGS_MAP and translation == "":
legacy_string = LEGACY_STRINGS_MAP[line]
if legacy_string in legacy:
updated_translations[line] = legacy[legacy_string]
number_of_updates += 1
else:
updated_translations[line] = current[line]
updated_translations[line] = translation
# Only replace file content if we've made any updates for legacy
# translated strings.

View File

@@ -25,12 +25,14 @@ langs = {
for alias in aliases
}
for name in priorities:
if langs.get(name) is None:
langs[name] = {
"priority": priorities[name],
langs |= {
name: {
"priority": priority,
"pretty_name": name,
}
for name, priority in priorities.items()
if name not in langs
}
with open(OUT_PATH, "w") as f:
json.dump({"langs": langs}, f)

View File

@@ -87,8 +87,8 @@ def generate_emoji_catalog(
# Sort the emojis according to iamcal's sort order. This sorting determines the
# order in which emojis will be displayed in emoji picker.
for category in emoji_catalog:
emoji_catalog[category].sort(key=lambda emoji_code: sort_order[emoji_code])
for emoji_codes in emoji_catalog.values():
emoji_codes.sort(key=lambda emoji_code: sort_order[emoji_code])
return dict(emoji_catalog)

View File

@@ -140,10 +140,8 @@ def generate_emoji_code_to_emoji_names_maps() -> None:
else:
reverse_unified_reactions_map[emoji_code] = [name]
for emoji_code in reverse_unified_reactions_map:
emoji_code_to_gemoji_names[emoji_code] = ", ".join(
reverse_unified_reactions_map[emoji_code]
)
for emoji_code, names in reverse_unified_reactions_map.items():
emoji_code_to_gemoji_names[emoji_code] = ", ".join(names)
# Prepare iamcal names map.
for emoji_dict in EMOJI_DATA:

View File

@@ -447,11 +447,11 @@ def run_tests_via_node_js() -> int:
def check_line_coverage(
fn: str, line_coverage: dict[Any, Any], line_mapping: dict[Any, Any], log: bool = True
) -> bool:
missing_lines = []
for line in line_coverage:
if line_coverage[line] == 0:
actual_line = line_mapping[line]
missing_lines.append(str(actual_line["start"]["line"]))
missing_lines = [
str(line_mapping[line]["start"]["line"])
for line, coverage in line_coverage.items()
if coverage == 0
]
if missing_lines:
if log:
print_error(f"{fn} no longer has complete node test coverage")

View File

@@ -267,11 +267,11 @@ def get_realm_authentication_methods_for_page_params_api(
from corporate.models import CustomerPlan
for backend_name in result_dict:
for backend_name, backend_result in result_dict.items():
available_for = AUTH_BACKEND_NAME_MAP[backend_name].available_for_cloud_plans
if available_for is not None and realm.plan_type not in available_for:
result_dict[backend_name]["available"] = False
backend_result["available"] = False
required_upgrade_plan_number = min(
set(available_for).intersection({Realm.PLAN_TYPE_STANDARD, Realm.PLAN_TYPE_PLUS})
@@ -286,11 +286,11 @@ def get_realm_authentication_methods_for_page_params_api(
CustomerPlan.TIER_CLOUD_PLUS
)
result_dict[backend_name]["unavailable_reason"] = _(
backend_result["unavailable_reason"] = _(
"You need to upgrade to the {required_upgrade_plan_name} plan to use this authentication method."
).format(required_upgrade_plan_name=required_upgrade_plan_name)
else:
result_dict[backend_name]["available"] = True
backend_result["available"] = True
return result_dict

View File

@@ -160,8 +160,7 @@ def convert_channel_data(
channel_members_map[channel_name] = []
channel_admins_map[channel_name] = []
for username in user_data_map:
user_dict = user_data_map[username]
for username, user_dict in user_data_map.items():
teams = user_dict["teams"]
if user_dict["teams"] is None:
continue
@@ -844,8 +843,7 @@ def label_mirror_dummy_users(
def reset_mirror_dummy_users(username_to_user: dict[str, dict[str, Any]]) -> None:
for username in username_to_user:
user = username_to_user[username]
for user in username_to_user.values():
user["is_mirror_dummy"] = False

View File

@@ -64,8 +64,7 @@ def process_users(
realm_owners: list[int] = []
bots: list[int] = []
for rc_user_id in user_id_to_user_map:
user_dict = user_id_to_user_map[rc_user_id]
for rc_user_id, user_dict in user_id_to_user_map.items():
is_mirror_dummy = False
is_bot = False
is_active = True
@@ -166,9 +165,7 @@ def convert_channel_data(
) -> list[ZerverFieldsT]:
streams = []
for rc_room_id in room_id_to_room_map:
channel_dict = room_id_to_room_map[rc_room_id]
for rc_room_id, channel_dict in room_id_to_room_map.items():
date_created = float(channel_dict["ts"].timestamp())
stream_id = stream_id_mapper.get(rc_room_id)
invite_only = channel_dict["t"] == "p"
@@ -214,9 +211,7 @@ def convert_stream_subscription_data(
) -> None:
stream_members_map: dict[int, set[int]] = {}
for rc_user_id in user_id_to_user_map:
user_dict = user_id_to_user_map[rc_user_id]
for rc_user_id, user_dict in user_id_to_user_map.items():
if not user_dict.get("__rooms"):
continue
@@ -249,11 +244,10 @@ def convert_direct_message_group_data(
) -> list[ZerverFieldsT]:
zerver_direct_message_group: list[ZerverFieldsT] = []
for rc_direct_message_group_id in direct_message_group_id_to_direct_message_group_map:
direct_message_group_dict = direct_message_group_id_to_direct_message_group_map[
rc_direct_message_group_id
]
for (
rc_direct_message_group_id,
direct_message_group_dict,
) in direct_message_group_id_to_direct_message_group_map.items():
direct_message_group_id = direct_message_group_id_mapper.get(rc_direct_message_group_id)
direct_message_group = build_direct_message_group(
direct_message_group_id, len(direct_message_group_dict["uids"])

View File

@@ -238,10 +238,10 @@ def cache_set_many(
items: dict[str, Any], cache_name: str | None = None, timeout: int | None = None
) -> None:
new_items = {}
for key in items:
for key, item in items.items():
new_key = KEY_PREFIX + key
validate_cache_key(new_key)
new_items[new_key] = items[key]
new_items[new_key] = item
items = new_items
remote_cache_stats_start()
get_cache_backend(cache_name).set_many(items, timeout=timeout)

View File

@@ -184,80 +184,64 @@ class APIReturnValuesTablePreprocessor(Preprocessor):
def render_table(self, return_values: dict[str, Any], spacing: int) -> list[str]:
IGNORE = ["result", "msg", "ignored_parameters_unsupported"]
ans = []
for return_value in return_values:
for return_value, schema in return_values.items():
if return_value in IGNORE:
continue
if "oneOf" in return_values[return_value]:
if "oneOf" in schema:
# For elements using oneOf there are two descriptions. The first description
# should be at level with the oneOf and should contain the basic non-specific
# description of the endpoint. Then for each element of oneOf there is a
# specialized description for that particular case. The description used
# right below is the main description.
data_type = generate_data_type(return_values[return_value])
data_type = generate_data_type(schema)
ans.append(
self.render_desc(
return_values[return_value]["description"], spacing, data_type, return_value
self.render_desc(schema["description"], spacing, data_type, return_value)
)
)
ans += self.render_oneof_block(return_values[return_value], spacing + 4)
ans += self.render_oneof_block(schema, spacing + 4)
continue
description = return_values[return_value]["description"]
data_type = generate_data_type(return_values[return_value])
check_deprecated_consistency(
return_values[return_value].get("deprecated", False), description
)
description = schema["description"]
data_type = generate_data_type(schema)
check_deprecated_consistency(schema.get("deprecated", False), description)
ans.append(self.render_desc(description, spacing, data_type, return_value))
if "properties" in return_values[return_value]:
ans += self.render_table(return_values[return_value]["properties"], spacing + 4)
if return_values[return_value].get("additionalProperties", False):
data_type = generate_data_type(return_values[return_value]["additionalProperties"])
if "properties" in schema:
ans += self.render_table(schema["properties"], spacing + 4)
if schema.get("additionalProperties", False):
data_type = generate_data_type(schema["additionalProperties"])
ans.append(
self.render_desc(
return_values[return_value]["additionalProperties"]["description"],
schema["additionalProperties"]["description"],
spacing + 4,
data_type,
)
)
if "properties" in return_values[return_value]["additionalProperties"]:
if "properties" in schema["additionalProperties"]:
ans += self.render_table(
return_values[return_value]["additionalProperties"]["properties"],
schema["additionalProperties"]["properties"],
spacing + 8,
)
elif "oneOf" in return_values[return_value]["additionalProperties"]:
ans += self.render_oneof_block(
return_values[return_value]["additionalProperties"], spacing + 8
)
elif return_values[return_value]["additionalProperties"].get(
"additionalProperties", False
):
elif "oneOf" in schema["additionalProperties"]:
ans += self.render_oneof_block(schema["additionalProperties"], spacing + 8)
elif schema["additionalProperties"].get("additionalProperties", False):
data_type = generate_data_type(
return_values[return_value]["additionalProperties"]["additionalProperties"]
schema["additionalProperties"]["additionalProperties"]
)
ans.append(
self.render_desc(
return_values[return_value]["additionalProperties"][
"additionalProperties"
]["description"],
schema["additionalProperties"]["additionalProperties"]["description"],
spacing + 8,
data_type,
)
)
ans += self.render_table(
return_values[return_value]["additionalProperties"]["additionalProperties"][
"properties"
],
schema["additionalProperties"]["additionalProperties"]["properties"],
spacing + 12,
)
if "items" in return_values[return_value]:
if "properties" in return_values[return_value]["items"]:
ans += self.render_table(
return_values[return_value]["items"]["properties"], spacing + 4
)
elif "oneOf" in return_values[return_value]["items"]:
ans += self.render_oneof_block(
return_values[return_value]["items"], spacing + 4
)
if "items" in schema:
if "properties" in schema["items"]:
ans += self.render_table(schema["items"]["properties"], spacing + 4)
elif "oneOf" in schema["items"]:
ans += self.render_oneof_block(schema["items"], spacing + 4)
return ans
def generate_event_strings(self, event_data: EventData) -> list[str]:

View File

@@ -55,16 +55,15 @@ def create_role_based_system_groups(
# failure, and had already processed this realm.
continue
role_system_groups_dict = {}
for role in SYSTEM_USER_GROUP_ROLE_MAP:
user_group_params = SYSTEM_USER_GROUP_ROLE_MAP[role]
user_group = UserGroup(
role_system_groups_dict = {
role: UserGroup(
name=user_group_params["name"],
description=user_group_params["description"],
realm=realm,
is_system_group=True,
)
role_system_groups_dict[role] = user_group
for role, user_group_params in SYSTEM_USER_GROUP_ROLE_MAP.items()
}
full_members_system_group = UserGroup(
name="@role:fullmembers",

View File

@@ -64,16 +64,15 @@ def create_role_based_system_groups_for_internal_realms(
# failure, and had already created groups.
return
role_system_groups_dict = {}
for role in SYSTEM_USER_GROUP_ROLE_MAP:
user_group_params = SYSTEM_USER_GROUP_ROLE_MAP[role]
user_group = UserGroup(
role_system_groups_dict = {
role: UserGroup(
name=user_group_params["name"],
description=user_group_params["description"],
realm=realm,
is_system_group=True,
)
role_system_groups_dict[role] = user_group
for role, user_group_params in SYSTEM_USER_GROUP_ROLE_MAP.items()
}
full_members_system_group = UserGroup(
name="@role:fullmembers",

View File

@@ -481,11 +481,11 @@ class BaseAction(ZulipTestCase):
print(json.dumps(event, indent=4))
print("\nMISMATCHES:\n")
for k in state1:
if state1[k] != state2[k]:
for k, v1 in state1.items():
if v1 != state2[k]:
print("\nkey = " + k)
try:
self.assertEqual({k: state1[k]}, {k: state2[k]})
self.assertEqual({k: v1}, {k: state2[k]})
except AssertionError as e:
print(e)
print(

View File

@@ -752,7 +752,6 @@ class PermissionTest(ZulipTestCase):
def test_admin_user_can_change_profile_data(self) -> None:
realm = get_realm("zulip")
self.login("iago")
new_profile_data = []
cordelia = self.example_user("cordelia")
# Setting editable_by_user to false shouldn't affect admin's ability
@@ -780,14 +779,13 @@ class PermissionTest(ZulipTestCase):
"Pronouns": "she/her",
}
for field_name in fields:
field = CustomProfileField.objects.get(name=field_name, realm=realm)
new_profile_data.append(
new_profile_data = [
{
"id": field.id,
"value": fields[field_name],
"id": CustomProfileField.objects.get(name=field_name, realm=realm).id,
"value": value,
}
)
for field_name, value in fields.items()
]
result = self.client_patch(
f"/json/users/{cordelia.id}", {"profile_data": orjson.dumps(new_profile_data).decode()}

View File

@@ -237,9 +237,7 @@ class MissedMessageWorker(QueueProcessingWorker):
trigger=event.trigger, mentioned_user_group_id=event.mentioned_user_group_id
)
for user_profile_id in events_by_recipient:
events = events_by_recipient[user_profile_id]
for user_profile_id, events in events_by_recipient.items():
logging.info(
"Batch-processing %s missedmessage_emails events for user %s",
len(events),